From 0eedc5593efabf78b591276ffc95f67e83875459 Mon Sep 17 00:00:00 2001 From: zhangzhichao Date: Mon, 19 Feb 2024 16:01:33 +0800 Subject: [PATCH 01/68] feat: CenterCropPad --- .../tensor/tensor.center_crop_pad.md | 42 ++ nodegen/node/center_crop_pad.py | 69 ++ src/operators/tensor/core.cairo | 47 ++ .../tensor/implementations/tensor_bool.cairo | 7 + .../implementations/tensor_complex64.cairo | 9 + .../implementations/tensor_fp16x16.cairo | 7 + .../implementations/tensor_fp16x16wide.cairo | 7 + .../implementations/tensor_fp32x32.cairo | 7 + .../implementations/tensor_fp64x64.cairo | 7 + .../implementations/tensor_fp8x23.cairo | 7 + .../implementations/tensor_fp8x23wide.cairo | 7 + .../tensor/implementations/tensor_i32.cairo | 7 + .../tensor/implementations/tensor_i8.cairo | 7 + .../tensor/implementations/tensor_u32.cairo | 7 + src/operators/tensor/manipulation.cairo | 1 + .../tensor/manipulation/center_crop_pad.cairo | 357 ++++++++++ tests/nodes.cairo | 6 + tests/nodes/export_center_crop_pad_crop.cairo | 21 + .../export_center_crop_pad_crop/input_0.cairo | 616 ++++++++++++++++++ .../output_0.cairo | 226 +++++++ .../export_center_crop_pad_crop_and_pad.cairo | 21 + .../input_0.cairo | 497 ++++++++++++++ .../output_0.cairo | 317 +++++++++ ...export_center_crop_pad_crop_axes_chw.cairo | 21 + .../input_0.cairo | 497 ++++++++++++++ .../output_0.cairo | 287 ++++++++ ...export_center_crop_pad_crop_axes_hwc.cairo | 21 + .../input_0.cairo | 496 ++++++++++++++ .../output_0.cairo | 286 ++++++++ ...nter_crop_pad_crop_negative_axes_hwc.cairo | 21 + .../input_0.cairo | 496 ++++++++++++++ .../output_0.cairo | 287 ++++++++ tests/nodes/export_center_crop_pad_pad.cairo | 21 + .../export_center_crop_pad_pad/input_0.cairo | 226 +++++++ .../export_center_crop_pad_pad/output_0.cairo | 616 ++++++++++++++++++ 35 files changed, 5574 insertions(+) create mode 100644 docs/framework/operators/tensor/tensor.center_crop_pad.md create mode 100644 nodegen/node/center_crop_pad.py create mode 100644 src/operators/tensor/manipulation/center_crop_pad.cairo create mode 100644 tests/nodes/export_center_crop_pad_crop.cairo create mode 100644 tests/nodes/export_center_crop_pad_crop/input_0.cairo create mode 100644 tests/nodes/export_center_crop_pad_crop/output_0.cairo create mode 100644 tests/nodes/export_center_crop_pad_crop_and_pad.cairo create mode 100644 tests/nodes/export_center_crop_pad_crop_and_pad/input_0.cairo create mode 100644 tests/nodes/export_center_crop_pad_crop_and_pad/output_0.cairo create mode 100644 tests/nodes/export_center_crop_pad_crop_axes_chw.cairo create mode 100644 tests/nodes/export_center_crop_pad_crop_axes_chw/input_0.cairo create mode 100644 tests/nodes/export_center_crop_pad_crop_axes_chw/output_0.cairo create mode 100644 tests/nodes/export_center_crop_pad_crop_axes_hwc.cairo create mode 100644 tests/nodes/export_center_crop_pad_crop_axes_hwc/input_0.cairo create mode 100644 tests/nodes/export_center_crop_pad_crop_axes_hwc/output_0.cairo create mode 100644 tests/nodes/export_center_crop_pad_crop_negative_axes_hwc.cairo create mode 100644 tests/nodes/export_center_crop_pad_crop_negative_axes_hwc/input_0.cairo create mode 100644 tests/nodes/export_center_crop_pad_crop_negative_axes_hwc/output_0.cairo create mode 100644 tests/nodes/export_center_crop_pad_pad.cairo create mode 100644 tests/nodes/export_center_crop_pad_pad/input_0.cairo create mode 100644 tests/nodes/export_center_crop_pad_pad/output_0.cairo diff --git a/docs/framework/operators/tensor/tensor.center_crop_pad.md b/docs/framework/operators/tensor/tensor.center_crop_pad.md new file mode 100644 index 000000000..0c46caec6 --- /dev/null +++ b/docs/framework/operators/tensor/tensor.center_crop_pad.md @@ -0,0 +1,42 @@ +# tensor.center_crop_pad + +```rust +fn center_crop_pad( + self: @Tensor, shape: Tensor, axes: Option>, zero: T +) -> Tensor +``` + +Center crop or pad an input to given dimensions. + +## Args + +* `self`(`@Tensor`) - Input to extract the centered crop from. +* `shape`(Tensor) - 1-D tensor representing the cropping window dimensions. +* `axes`(Option) - If provided, it specifies a subset of axes that ‘shape’ refer to. + +## Panics + +* Panics if axes is a negative number, axis+rank (self) is less than 0. + +## Returns + +Output data of tensors after crop/pad. + +## Examples + +```rust +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; +use core::option::OptionTrait; +fn center_crop_pad_example() -> Tensor { + let tensor: Tensor = TensorTrait::::new( + shape: array![5,4,1].span(), + data: array![ + 1, 2, 3, 4, 5, 6, 7,8,9,10,11,12,13,14,15,16,17,18,19,20 + ].span(), + ); + // We can call `center_crop_pad` function as follows. + return tensor.center_crop_pad(TensorTrait::new(array![3].span(), array![5,2,1].span()), Option::None(())); +} +>>> [[2,3],[6,7],[10,11],[14,15],[18,19]] +``` diff --git a/nodegen/node/center_crop_pad.py b/nodegen/node/center_crop_pad.py new file mode 100644 index 000000000..d8b0432c1 --- /dev/null +++ b/nodegen/node/center_crop_pad.py @@ -0,0 +1,69 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl + + +class Center_crop_pad(RunAll): + @staticmethod + def export_center_crop_pad_crop(): + x = np.array(range(600), dtype=np.complex64).reshape((20,10,3)) + _x = Tensor(Dtype.COMPLEX64, x.shape, x.flatten()) + y = x[5:15, 1:8, :] + _y = Tensor(Dtype.COMPLEX64, y.shape, y.flatten()) + + name = "export_center_crop_pad_crop" + make_test([_x], _y, "input_0.center_crop_pad(TensorTrait::new(array![3].span(), array![10,7,3].span())), Option::None(()))", name) + + @staticmethod + def export_center_crop_pad_pad(): + x = np.array(range(210), dtype=np.complex64).reshape((10,7,3)) + _x = Tensor(Dtype.COMPLEX64, x.shape, x.flatten()) + y = np.zeros([20,10,3], dtype=np.complex64) + y[5:15, 1:8, :] = x + _y = Tensor(Dtype.COMPLEX64, y.shape, y.flatten()) + + name = "export_center_crop_pad_pad" + make_test([_x], _y, "input_0.center_crop_pad(TensorTrait::new(array![3].span(), array![20,10,3].span()), Option::None(()))", name) + + @staticmethod + def export_center_crop_pad_crop_and_pad(): + # x = np.random.randn(20, 8, 3).astype(np.complex64) + x = np.array(np.random.randn(20,8,3), dtype=np.complex64) + _x = Tensor(Dtype.COMPLEX64, x.shape, x.flatten()) + y = np.zeros([10,10,3], dtype=np.complex64) + y[:, 1:9, :] = x[5:15, :, :] + _y = Tensor(Dtype.COMPLEX64, y.shape, y.flatten()) + + name = "export_center_crop_pad_crop_and_pad" + make_test([_x], _y, "input_0.center_crop_pad(TensorTrait::new(array![3].span(), array![10,10,3].span()), Option::None(()))", name) + + @staticmethod + def export_center_crop_pad_crop_axes_hwc(): + x = np.array(np.random.randn(20,8,3), dtype=np.complex64) + _x = Tensor(Dtype.COMPLEX64, x.shape, x.flatten()) + y = np.zeros([10,9,3], dtype=np.complex64) + y[:, :8, :] = x[5:15, :, :] + _y = Tensor(Dtype.COMPLEX64, y.shape, y.flatten()) + + name = "export_center_crop_pad_crop_axes_hwc" + make_test([_x], _y, "input_0.center_crop_pad(TensorTrait::new(array![2].span(), array![10,9].span()), Option::Some(array![0,1]))", name) + + @staticmethod + def export_center_crop_pad_crop_negative_axes_hwc(): + x = np.array(np.random.randn(20,8,3), dtype=np.complex64) + _x = Tensor(Dtype.COMPLEX64, x.shape, x.flatten()) + y = np.zeros([10,9,3], dtype=np.complex64) + y[:, :8, :] = x[5:15, :, :] + _y = Tensor(Dtype.COMPLEX64, y.shape, y.flatten()) + name = "export_center_crop_pad_crop_negative_axes_hwc" + make_test([_x], _y, "input_0.center_crop_pad(TensorTrait::new(array![2].span(), array![10,9].span()), Option::Some(array![-3,-2]))", name) + + @staticmethod + def export_center_crop_pad_crop_axes_chw(): + x = np.array(np.random.randn(3,20,8), dtype=np.complex64) + _x = Tensor(Dtype.COMPLEX64, x.shape, x.flatten()) + y = np.zeros([3,10,9], dtype=np.complex64) + y[:, :, :8] = x[:, 5:15, :] + _y = Tensor(Dtype.COMPLEX64, y.shape, y.flatten()) + name = "export_center_crop_pad_crop_axes_chw" + make_test([_x], _y, "input_0.center_crop_pad(TensorTrait::new(array![2].span(), array![10,9].span()), Option::Some(array![1,2]))", name) \ No newline at end of file diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 222b0f423..475c1f4a8 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -128,6 +128,7 @@ impl TensorSerde, impl TDrop: Drop> of Serde8Bit conversion of FP32 Input data. /// scatter_nd - The output of the operation is produced by creating a copy of the input data, and then updating its value to values specified by updates at specific index positions specified by indices. Its output shape is the same as the shape of data +/// center_crop_pad - Center crop or pad an input to given dimensions. trait TensorTrait { /// # tensor.new /// @@ -5665,6 +5666,52 @@ trait TensorTrait { fn random_uniform_like( tensor: @Tensor, high: Option, low: Option, seed: Option ) -> Tensor; + /// # tensor.center_crop_pad + /// + /// ```rust + /// fn center_crop_pad( + /// self: @Tensor, shape: Tensor, axes: Option>, zero: T + /// ) -> Tensor + /// ``` + /// + /// Center crop or pad an input to given dimensions. + /// + /// ## Args + /// + /// * `self`(`@Tensor`) - Input to extract the centered crop from. + /// * `shape`(Tensor) - 1-D tensor representing the cropping window dimensions. + /// * `axes`(Option) - If provided, it specifies a subset of axes that ‘shape’ refer to. + /// + /// ## Panics + /// + /// * Panics if axes is a negative number, axis+rank (self) is less than 0. + /// + /// ## Returns + /// + /// Output data of tensors after crop/pad. + /// + /// ## Examples + /// + /// ```rust + /// use core::array::{ArrayTrait, SpanTrait}; + /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; + /// use core::option::OptionTrait; + /// fn center_crop_pad_example() -> Tensor { + /// let tensor: Tensor = TensorTrait::::new( + /// shape: array![5,4,1].span(), + /// data: array![ + /// 1, 2, 3, 4, 5, 6, 7,8,9,10,11,12,13,14,15,16,17,18,19,20 + /// ].span(), + /// ); + /// // We can call `center_crop_pad` function as follows. + /// return tensor.center_crop_pad(TensorTrait::new(array![3].span(), array![5,2,1].span()), Option::None(())); + /// } + /// >>> [[2,3],[6,7],[10,11],[14,15],[18,19]] + /// ``` + /// + fn center_crop_pad( + self: @Tensor, shape: Tensor, axes: Option> + ) -> Tensor; } /// Cf: TensorTrait::new docstring diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index e75405743..531ae605c 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -537,6 +537,13 @@ impl BoolTensor of TensorTrait { ) -> Tensor { panic(array!['not supported!']) } + + fn center_crop_pad( + self: @Tensor, shape: Tensor, axes: Option> + ) -> Tensor { + let zero = false; + manipulation::center_crop_pad::center_crop_pad(self, shape, axes, zero) + } } /// Implements partial equal for two `Tensor` using the `PartialEq` trait. diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 84b3edb21..f3b4b38a5 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -574,6 +574,15 @@ impl Complex64Tensor of TensorTrait { ) -> Tensor { panic(array!['not supported!']) } + + fn center_crop_pad( + self: @Tensor, + shape: Tensor, + axes: Option> + ) -> Tensor { + let zero = ComplexTrait::zero(); + manipulation::center_crop_pad::center_crop_pad(self, shape, axes, zero) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index 05dd23ea2..27c877a6e 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -625,6 +625,13 @@ impl FP16x16Tensor of TensorTrait { ) -> Tensor { math::scatter_nd::scatter_nd(self, updates, indices, reduction) } + + fn center_crop_pad( + self: @Tensor, shape: Tensor, axes: Option> + ) -> Tensor { + let zero = NumberTrait::::zero(); + manipulation::center_crop_pad::center_crop_pad(self, shape, axes, zero) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index 54aadbd3e..569996d86 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -580,6 +580,13 @@ impl FP16x16WTensor of TensorTrait { ) -> Tensor { math::scatter_nd::scatter_nd(self, updates, indices, reduction) } + + fn center_crop_pad( + self: @Tensor, shape: Tensor, axes: Option> + ) -> Tensor { + let zero = NumberTrait::::zero(); + manipulation::center_crop_pad::center_crop_pad(self, shape, axes, zero) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 7402cd761..4b24c111f 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -623,6 +623,13 @@ impl FP32x32Tensor of TensorTrait { ) -> Tensor { math::scatter_nd::scatter_nd(self, updates, indices, reduction) } + + fn center_crop_pad( + self: @Tensor, shape: Tensor, axes: Option> + ) -> Tensor { + let zero = NumberTrait::::zero(); + manipulation::center_crop_pad::center_crop_pad(self, shape, axes, zero) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 4477b0025..939b22ecd 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -624,6 +624,13 @@ impl FP64x64Tensor of TensorTrait { ) -> Tensor { math::scatter_nd::scatter_nd(self, updates, indices, reduction) } + + fn center_crop_pad( + self: @Tensor, shape: Tensor, axes: Option> + ) -> Tensor { + let zero = NumberTrait::::zero(); + manipulation::center_crop_pad::center_crop_pad(self, shape, axes, zero) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index c16b7feed..3717850b2 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -622,6 +622,13 @@ impl FP8x23Tensor of TensorTrait { ) -> Tensor { math::scatter_nd::scatter_nd(self, updates, indices, reduction) } + + fn center_crop_pad( + self: @Tensor, shape: Tensor, axes: Option> + ) -> Tensor { + let zero = NumberTrait::::zero(); + manipulation::center_crop_pad::center_crop_pad(self, shape, axes, zero) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index 0e5ecc074..508313f89 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -561,6 +561,13 @@ impl FP8x23WTensor of TensorTrait { ) -> Tensor { math::scatter_nd::scatter_nd(self, updates, indices, reduction) } + + fn center_crop_pad( + self: @Tensor, shape: Tensor, axes: Option> + ) -> Tensor { + let zero = NumberTrait::::zero(); + manipulation::center_crop_pad::center_crop_pad(self, shape, axes, zero) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 5e637d4ff..078449643 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -587,6 +587,13 @@ impl I32Tensor of TensorTrait { ) -> Tensor { math::scatter_nd::scatter_nd(self, updates, indices, reduction) } + + fn center_crop_pad( + self: @Tensor, shape: Tensor, axes: Option> + ) -> Tensor { + let zero = 0_i32; + manipulation::center_crop_pad::center_crop_pad(self, shape, axes, zero) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index a5f9476c1..d08db2c26 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -591,6 +591,13 @@ impl I8Tensor of TensorTrait { ) -> Tensor { math::scatter_nd::scatter_nd(self, updates, indices, reduction) } + + fn center_crop_pad( + self: @Tensor, shape: Tensor, axes: Option> + ) -> Tensor { + let zero = 0_i8; + manipulation::center_crop_pad::center_crop_pad(self, shape, axes, zero) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index 00ab75b1d..9f243e942 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -534,6 +534,13 @@ impl U32Tensor of TensorTrait { ) -> Tensor { math::scatter_nd::scatter_nd(self, updates, indices, reduction) } + + fn center_crop_pad( + self: @Tensor, shape: Tensor, axes: Option> + ) -> Tensor { + let zero = 0_u32; + manipulation::center_crop_pad::center_crop_pad(self, shape, axes, zero) + } } /// Implements addition for `Tensor` using the `Add` trait. diff --git a/src/operators/tensor/manipulation.cairo b/src/operators/tensor/manipulation.cairo index 057e5afad..18edfc105 100644 --- a/src/operators/tensor/manipulation.cairo +++ b/src/operators/tensor/manipulation.cairo @@ -3,3 +3,4 @@ mod split; mod split_to_sequence; mod reverse_sequence; mod optional; +mod center_crop_pad; \ No newline at end of file diff --git a/src/operators/tensor/manipulation/center_crop_pad.cairo b/src/operators/tensor/manipulation/center_crop_pad.cairo new file mode 100644 index 000000000..a47818f45 --- /dev/null +++ b/src/operators/tensor/manipulation/center_crop_pad.cairo @@ -0,0 +1,357 @@ +use orion::operators::tensor::{Tensor, TensorTrait, U32Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use core::option::OptionTrait; +use orion::numbers::NumberTrait; +use alexandria_data_structures::array_ext::{ArrayTraitExt, SpanTraitExt}; + +/// Cf: Tensor::center_crop_pad docstring +fn center_crop_pad< + T, + +Copy, + +Drop, + +TensorTrait, +>( + self: @Tensor, shape: Tensor, axes: Option>, zero: T +) -> Tensor { + let input_rank: usize = (*self.shape).len(); + let mut axes: Array = match axes { + Option::Some(mut value) => { + let mut axes: Array = ArrayTrait::new(); + loop { + match value.pop_front() { + Option::Some(axis) => { + if axis >= 0 { + axes.append(axis.try_into().unwrap()); + } else { + let mut input_rank: i64 = input_rank.into(); + assert!(axis + input_rank >= 0, "shape cannot be less than 0"); + axes.append((axis + input_rank).try_into().unwrap()); + } + }, + Option::None => { break (); }, + }; + }; + axes + }, + Option::None => { + let mut axes: Array = ArrayTrait::new(); + let mut i: usize = 0; + loop { + if i > input_rank - 1 { + break (); + } + axes.append(i); + i += 1; + }; + axes + } + }; + + let mut pad_slices: Array> = ArrayTrait::new(); + let mut crop_slices: Array> = ArrayTrait::new(); + let mut self_shape_copy = (*self.shape).clone(); + loop { + match self_shape_copy.pop_front() { + Option::Some(dim) => { + let mut temp: Array = ArrayTrait::new(); + let mut i: usize = 0; + loop { + if i > *dim - 1 { + break (); + } + temp.append(i); + i += 1; + }; + pad_slices.append(temp.clone()); + crop_slices.append(temp.clone()); + }, + Option::None(_) => { break (); } + }; + }; + + let mut new_shape: Array = ArrayTrait::new(); + let mut self_shape_copy = (*self.shape).clone(); + loop { + match self_shape_copy.pop_front() { + Option::Some(dim) => { + new_shape.append(*dim); + }, + Option::None(_) => { break (); } + }; + }; + + let mut i: usize = 0; + loop { + + let mut a: usize = match axes.pop_front() { + Option::Some(axes) => axes.try_into().unwrap(), + Option::None(_) => { break (); } + }; + + let mut sh: usize = match shape.data.get(i) { + Option::Some(sh) => { + let res: usize = (*sh.unbox()).try_into().unwrap(); + res + }, + Option::None(_) => { break (); } + }; + + let mut dim: usize = (*(*self.shape).at(a)); + if sh == a { + continue; + } else if sh < dim { + usize_cover(ref new_shape, a, sh); + let mut d = dim - sh; + let mut sl:Array = ArrayTrait::new(); + if d % 2 == 0 { + d /= 2; + sl = slice(d, dim - d); + } else { + d /= 2; + sl = slice(d, dim - d - 1); + } + array_cover(ref crop_slices, a, sl); + } else { + // sh > dim + usize_cover(ref new_shape, a, sh); + let mut d = sh - dim; + let mut sl:Array = ArrayTrait::new(); + if d % 2 == 0 { + d /= 2; + sl = slice(d, sh - d); + } else { + d /= 2; + sl = slice(d, sh - d - 1); + } + array_cover(ref pad_slices, a, sl); + }; + i += 1; + }; + + let mut cropped = tensor_crop(self, crop_slices); + let result = tensor_pad(cropped, pad_slices, new_shape, zero); + result +} + +fn tensor_pad< + T, + +Copy, + +Drop, + +TensorTrait, +>(input_data: Tensor, mut pad_slices: Array>, shape: Array, zero: T) -> Tensor { + let mut count: usize = 1; + let mut res: Span = input_data.data; + let mut shape_copy = shape.clone(); + let mut i: usize = input_data.shape.len().into() - 1; + loop { + let mut shape_i = shape_copy.at(i); + let mut input_data_shape_i = input_data.shape.at(i); + let mut slice = pad_slices.at(i); + let mut slice_len = slice.len(); + if slice_len > *shape_i { + slice_len = *shape_i; + } + if i == 0 { + if shape_i != input_data_shape_i { + let mut temp = res; + res = ArrayTrait::::new().span(); + res = make_zero_array(*slice.at(0) * count, zero).concat( + temp + ).concat( + make_zero_array((*shape_i - *slice.at(slice_len - 1) - 1) * count, zero) + ); + } + break (); + } + if shape_i != input_data_shape_i { + let mut arr_list: Array> = make_array_from_dim(res, count * *input_data_shape_i); + res = ArrayTrait::::new().span(); + loop { + match arr_list.pop_front() { + Option::Some(mut arr) => { + res = res.concat(make_zero_array(*slice.at(0) * count, zero)); + res = res.concat(arr.span()); + res = res.concat(make_zero_array((*shape_i - *slice.at(slice_len - 1) - 1) * count, zero)); + }, + Option::None(_) => { break (); } + }; + }; + } + count *= *shape_i; + i -= 1; + }; + TensorTrait::::new(shape.span(), res) +} + +fn tensor_crop< + T, + +Copy, + +Drop, + +TensorTrait, +>(input_data: @Tensor, mut crop_slices: Array>) -> Tensor { + let mut input_data_shape_copy: Span = *input_data.shape; + let mut count = 1; + let mut shape: Array = ArrayTrait::new(); + let mut i: usize = 0; + loop { + if i > input_data_shape_copy.len() - 1 { + break (); + } + shape.append(*input_data_shape_copy.at(i)); + i += 1; + }; + + let mut res: Span = *input_data.data; + let mut i: usize = input_data_shape_copy.len() - 1; + loop { + let mut dim = (*input_data_shape_copy.at(i)); + let mut slice = crop_slices.at(i); + let slice_len: usize = slice.len(); + if i == 0 { + if dim != slice_len { + usize_cover(ref shape, i, slice_len); + let mut arr_list: Array> = make_array_from_dim(res, count); + res = ArrayTrait::::new().span(); + let mut j: usize = 0; + loop { + if j > slice_len - 1 { + break (); + } + res = res.concat(arr_list.at(*slice.at(j)).span()); + j += 1; + } + } + break (); + } + + if dim != slice_len { + usize_cover(ref shape, i, slice_len); + let mut arr_list: Array> = make_array_from_dim(res, count * dim); + res = ArrayTrait::::new().span(); + loop { + match arr_list.pop_front() { + Option::Some(mut arr) => { + let mut arr = make_array_from_dim(arr.span(), count); + let mut j: usize = 0; + loop { + if j > slice_len - 1 { + break (); + } + res = res.concat(arr.at(*slice.at(j)).span()); + j += 1; + }; + }, + Option::None(_) => { break(); } + }; + }; + } + count *= slice_len; + i -= 1; + }; + TensorTrait::new(shape.span(), res) +} + +fn make_zero_array< + T, + +Drop, + +Copy +>(size: usize, zero: T) -> Span { + let mut res: Array = ArrayTrait::new(); + if size == 0 { + return res.span(); + } + let mut i: usize = 0; + loop { + if i > size - 1 { + break (); + } + res.append(zero.clone()); + i += 1; + }; + res.span() +} + +fn slice(start: usize, end: usize) -> Array { + let mut index: Array = ArrayTrait::new(); + let mut i: usize = start; + loop { + if i > end - 1 { + break; + } + index.append(i); + i += 1; + }; + index +} + +fn array_cover(ref arr: Array>, index: usize, data: Array) { + + if arr.is_empty() { + arr.append(data); + return (); + } + + let mut arr_len: usize = arr.len(); + let mut i: usize = 0; + loop { + if i > arr_len - 1 { + break (); + } + let temp = arr.pop_front().unwrap(); + if i == index { + arr.append(data.clone()); + } else { + arr.append(temp); + } + i += 1; + }; +} + +fn usize_cover(ref arr: Array, index: usize, data: usize) { + + if arr.is_empty() { + arr.append(data); + return (); + } + + let mut arr_len: usize = arr.len(); + let mut i: usize = 0; + loop { + if i > arr_len - 1 { + break (); + } + + let temp = arr.pop_front().unwrap(); + if i == index { + arr.append(data.clone()); + } else { + arr.append(temp); + } + i += 1; + }; +} + +fn make_array_from_dim, +Copy>(input_data: Span, dim: usize) -> Array> { + let row: usize = input_data.len() / dim; + let data_copy: Span = input_data.clone(); + + let mut res = ArrayTrait::>::new(); + let mut i: usize = 0; + loop { + if i > row - 1 { + break (); + } + let mut temp: Array = ArrayTrait::new(); + let mut j: usize = 0; + loop { + if j > dim - 1 { + break (); + } + temp.append((*data_copy.at(i * dim + j))); + j += 1; + }; + res.append(temp); + i += 1; + }; + res +} \ No newline at end of file diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 8814cfb80..dea5e625e 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1039,3 +1039,9 @@ mod conv_2D_with_autopad_same; mod conv_2D_with_strides_asymmetric_padding; mod conv_2D_with_strides_with_padding; mod conv_4D_with_padding; +mod export_center_crop_pad_crop; +mod export_center_crop_pad_pad; +mod export_center_crop_pad_crop_axes_hwc; +mod export_center_crop_pad_crop_and_pad; +mod export_center_crop_pad_crop_axes_chw; +mod export_center_crop_pad_crop_negative_axes_hwc; diff --git a/tests/nodes/export_center_crop_pad_crop.cairo b/tests/nodes/export_center_crop_pad_crop.cairo new file mode 100644 index 000000000..b2c58a6c8 --- /dev/null +++ b/tests/nodes/export_center_crop_pad_crop.cairo @@ -0,0 +1,21 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::Complex64Tensor; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::Complex64TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_export_center_crop_pad_crop() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.center_crop_pad(TensorTrait::new(array![3].span(), array![10,7,3].span()), Option::None(())); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/export_center_crop_pad_crop/input_0.cairo b/tests/nodes/export_center_crop_pad_crop/input_0.cairo new file mode 100644 index 000000000..39bef3eb6 --- /dev/null +++ b/tests/nodes/export_center_crop_pad_crop/input_0.cairo @@ -0,0 +1,616 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::Complex64Tensor; +use orion::numbers::{NumberTrait, complex64}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::{FixedTrait, FP64x64}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(20); + shape.append(10); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 3, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 4, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 5, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 6, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 7, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 8, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 9, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 10, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 11, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 12, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 13, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 14, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 15, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 16, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 17, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 18, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 19, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 20, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 21, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 22, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 23, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 24, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 25, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 26, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 27, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 28, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 29, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 30, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 31, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 32, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 33, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 34, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 35, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 36, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 37, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 38, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 39, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 40, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 41, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 42, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 43, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 44, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 45, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 46, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 47, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 48, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 49, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 50, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 51, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 52, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 53, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 54, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 55, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 56, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 57, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 58, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 59, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 60, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 61, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 62, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 63, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 64, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 65, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 66, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 67, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 68, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 69, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 70, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 71, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 72, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 73, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 74, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 75, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 76, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 77, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 78, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 79, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 80, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 81, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 82, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 83, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 84, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 85, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 86, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 87, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 88, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 89, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 90, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 91, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 92, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 93, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 94, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 95, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 96, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 97, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 98, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 99, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 100, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 101, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 102, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 103, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 104, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 105, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 106, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 107, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 108, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 109, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 110, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 111, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 112, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 113, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 114, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 115, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 116, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 117, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 118, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 119, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 120, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 121, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 122, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 123, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 124, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 125, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 126, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 127, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 128, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 129, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 130, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 131, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 132, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 133, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 134, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 135, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 136, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 137, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 138, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 139, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 140, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 141, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 142, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 143, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 144, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 145, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 146, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 147, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 148, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 149, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 150, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 151, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 152, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 153, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 154, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 155, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 156, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 157, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 158, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 159, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 160, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 161, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 162, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 163, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 164, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 165, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 166, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 167, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 168, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 169, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 170, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 171, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 172, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 173, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 174, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 175, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 176, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 177, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 178, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 179, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 180, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 181, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 182, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 183, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 184, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 185, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 186, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 187, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 188, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 189, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 190, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 191, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 192, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 193, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 194, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 195, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 196, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 197, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 198, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 199, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 200, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 201, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 202, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 203, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 204, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 205, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 206, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 207, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 208, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 209, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 210, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 211, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 212, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 213, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 214, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 215, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 216, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 217, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 218, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 219, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 220, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 221, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 222, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 223, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 224, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 225, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 226, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 227, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 228, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 229, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 230, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 231, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 232, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 233, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 234, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 235, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 236, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 237, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 238, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 239, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 240, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 241, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 242, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 243, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 244, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 245, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 246, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 247, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 248, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 249, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 250, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 251, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 252, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 253, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 254, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 255, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 256, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 257, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 258, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 259, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 260, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 261, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 262, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 263, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 264, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 265, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 266, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 267, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 268, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 269, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 270, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 271, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 272, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 273, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 274, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 275, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 276, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 277, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 278, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 279, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 280, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 281, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 282, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 283, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 284, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 285, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 286, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 287, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 288, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 289, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 290, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 291, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 292, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 293, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 294, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 295, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 296, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 297, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 298, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 299, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 300, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 301, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 302, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 303, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 304, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 305, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 306, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 307, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 308, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 309, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 310, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 311, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 312, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 313, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 314, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 315, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 316, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 317, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 318, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 319, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 320, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 321, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 322, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 323, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 324, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 325, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 326, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 327, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 328, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 329, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 330, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 331, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 332, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 333, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 334, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 335, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 336, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 337, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 338, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 339, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 340, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 341, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 342, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 343, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 344, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 345, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 346, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 347, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 348, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 349, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 350, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 351, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 352, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 353, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 354, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 355, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 356, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 357, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 358, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 359, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 360, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 361, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 362, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 363, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 364, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 365, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 366, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 367, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 368, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 369, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 370, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 371, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 372, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 373, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 374, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 375, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 376, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 377, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 378, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 379, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 380, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 381, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 382, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 383, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 384, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 385, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 386, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 387, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 388, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 389, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 390, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 391, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 392, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 393, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 394, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 395, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 396, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 397, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 398, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 399, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 400, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 401, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 402, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 403, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 404, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 405, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 406, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 407, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 408, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 409, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 410, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 411, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 412, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 413, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 414, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 415, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 416, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 417, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 418, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 419, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 420, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 421, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 422, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 423, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 424, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 425, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 426, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 427, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 428, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 429, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 430, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 431, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 432, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 433, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 434, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 435, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 436, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 437, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 438, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 439, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 440, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 441, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 442, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 443, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 444, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 445, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 446, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 447, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 448, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 449, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 450, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 451, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 452, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 453, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 454, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 455, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 456, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 457, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 458, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 459, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 460, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 461, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 462, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 463, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 464, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 465, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 466, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 467, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 468, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 469, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 470, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 471, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 472, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 473, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 474, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 475, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 476, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 477, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 478, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 479, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 480, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 481, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 482, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 483, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 484, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 485, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 486, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 487, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 488, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 489, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 490, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 491, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 492, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 493, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 494, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 495, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 496, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 497, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 498, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 499, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 500, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 501, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 502, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 503, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 504, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 505, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 506, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 507, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 508, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 509, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 510, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 511, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 512, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 513, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 514, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 515, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 516, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 517, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 518, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 519, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 520, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 521, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 522, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 523, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 524, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 525, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 526, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 527, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 528, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 529, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 530, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 531, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 532, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 533, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 534, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 535, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 536, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 537, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 538, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 539, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 540, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 541, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 542, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 543, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 544, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 545, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 546, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 547, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 548, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 549, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 550, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 551, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 552, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 553, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 554, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 555, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 556, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 557, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 558, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 559, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 560, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 561, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 562, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 563, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 564, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 565, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 566, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 567, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 568, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 569, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 570, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 571, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 572, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 573, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 574, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 575, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 576, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 577, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 578, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 579, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 580, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 581, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 582, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 583, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 584, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 585, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 586, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 587, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 588, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 589, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 590, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 591, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 592, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 593, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 594, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 595, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 596, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 597, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 598, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 599, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/export_center_crop_pad_crop/output_0.cairo b/tests/nodes/export_center_crop_pad_crop/output_0.cairo new file mode 100644 index 000000000..74def2b6f --- /dev/null +++ b/tests/nodes/export_center_crop_pad_crop/output_0.cairo @@ -0,0 +1,226 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::Complex64Tensor; +use orion::numbers::{NumberTrait, complex64}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::{FixedTrait, FP64x64}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(10); + shape.append(7); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(complex64 { real: FP64x64 { mag: 153, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 154, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 155, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 156, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 157, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 158, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 159, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 160, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 161, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 162, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 163, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 164, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 165, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 166, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 167, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 168, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 169, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 170, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 171, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 172, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 173, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 183, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 184, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 185, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 186, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 187, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 188, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 189, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 190, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 191, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 192, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 193, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 194, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 195, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 196, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 197, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 198, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 199, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 200, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 201, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 202, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 203, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 213, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 214, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 215, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 216, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 217, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 218, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 219, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 220, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 221, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 222, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 223, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 224, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 225, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 226, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 227, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 228, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 229, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 230, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 231, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 232, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 233, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 243, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 244, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 245, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 246, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 247, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 248, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 249, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 250, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 251, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 252, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 253, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 254, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 255, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 256, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 257, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 258, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 259, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 260, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 261, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 262, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 263, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 273, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 274, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 275, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 276, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 277, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 278, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 279, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 280, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 281, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 282, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 283, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 284, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 285, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 286, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 287, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 288, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 289, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 290, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 291, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 292, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 293, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 303, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 304, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 305, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 306, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 307, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 308, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 309, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 310, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 311, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 312, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 313, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 314, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 315, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 316, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 317, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 318, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 319, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 320, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 321, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 322, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 323, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 333, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 334, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 335, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 336, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 337, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 338, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 339, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 340, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 341, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 342, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 343, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 344, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 345, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 346, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 347, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 348, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 349, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 350, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 351, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 352, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 353, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 363, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 364, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 365, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 366, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 367, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 368, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 369, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 370, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 371, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 372, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 373, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 374, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 375, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 376, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 377, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 378, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 379, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 380, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 381, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 382, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 383, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 393, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 394, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 395, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 396, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 397, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 398, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 399, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 400, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 401, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 402, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 403, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 404, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 405, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 406, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 407, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 408, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 409, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 410, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 411, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 412, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 413, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 423, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 424, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 425, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 426, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 427, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 428, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 429, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 430, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 431, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 432, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 433, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 434, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 435, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 436, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 437, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 438, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 439, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 440, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 441, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 442, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 443, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/export_center_crop_pad_crop_and_pad.cairo b/tests/nodes/export_center_crop_pad_crop_and_pad.cairo new file mode 100644 index 000000000..aba03947f --- /dev/null +++ b/tests/nodes/export_center_crop_pad_crop_and_pad.cairo @@ -0,0 +1,21 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::Complex64Tensor; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::Complex64TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_export_center_crop_pad_crop_and_pad() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.center_crop_pad(TensorTrait::new(array![3].span(), array![10,10,3].span()), Option::None(())); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/export_center_crop_pad_crop_and_pad/input_0.cairo b/tests/nodes/export_center_crop_pad_crop_and_pad/input_0.cairo new file mode 100644 index 000000000..eaca95f70 --- /dev/null +++ b/tests/nodes/export_center_crop_pad_crop_and_pad/input_0.cairo @@ -0,0 +1,497 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::Complex64Tensor; +use orion::numbers::{NumberTrait, complex64}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::{FixedTrait, FP64x64}; + + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(20); + shape.append(8); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 3, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 3, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 3, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/export_center_crop_pad_crop_and_pad/output_0.cairo b/tests/nodes/export_center_crop_pad_crop_and_pad/output_0.cairo new file mode 100644 index 000000000..87ca4d987 --- /dev/null +++ b/tests/nodes/export_center_crop_pad_crop_and_pad/output_0.cairo @@ -0,0 +1,317 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::Complex64Tensor; +use orion::numbers::{NumberTrait, complex64}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::{FixedTrait, FP64x64}; + + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(10); + shape.append(10); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 3, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/export_center_crop_pad_crop_axes_chw.cairo b/tests/nodes/export_center_crop_pad_crop_axes_chw.cairo new file mode 100644 index 000000000..4ebff18f3 --- /dev/null +++ b/tests/nodes/export_center_crop_pad_crop_axes_chw.cairo @@ -0,0 +1,21 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::Complex64Tensor; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::Complex64TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_export_center_crop_pad_crop_axes_chw() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.center_crop_pad(TensorTrait::new(array![2].span(), array![10,9].span()), Option::Some(array![1,2])); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/export_center_crop_pad_crop_axes_chw/input_0.cairo b/tests/nodes/export_center_crop_pad_crop_axes_chw/input_0.cairo new file mode 100644 index 000000000..81c18ab9b --- /dev/null +++ b/tests/nodes/export_center_crop_pad_crop_axes_chw/input_0.cairo @@ -0,0 +1,497 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::Complex64Tensor; +use orion::numbers::{NumberTrait, complex64}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::{FixedTrait, FP64x64}; + + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(20); + shape.append(8); + + let mut data = ArrayTrait::new(); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 3, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/export_center_crop_pad_crop_axes_chw/output_0.cairo b/tests/nodes/export_center_crop_pad_crop_axes_chw/output_0.cairo new file mode 100644 index 000000000..3bbab318c --- /dev/null +++ b/tests/nodes/export_center_crop_pad_crop_axes_chw/output_0.cairo @@ -0,0 +1,287 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::Complex64Tensor; +use orion::numbers::{NumberTrait, complex64}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::{FixedTrait, FP64x64}; + + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(10); + shape.append(9); + + let mut data = ArrayTrait::new(); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 3, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/export_center_crop_pad_crop_axes_hwc.cairo b/tests/nodes/export_center_crop_pad_crop_axes_hwc.cairo new file mode 100644 index 000000000..57614ffc3 --- /dev/null +++ b/tests/nodes/export_center_crop_pad_crop_axes_hwc.cairo @@ -0,0 +1,21 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::Complex64Tensor; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::Complex64TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_export_center_crop_pad_crop_axes_hwc() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.center_crop_pad(TensorTrait::new(array![2].span(), array![10,9].span()), Option::Some(array![0,1])); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/export_center_crop_pad_crop_axes_hwc/input_0.cairo b/tests/nodes/export_center_crop_pad_crop_axes_hwc/input_0.cairo new file mode 100644 index 000000000..921a724bc --- /dev/null +++ b/tests/nodes/export_center_crop_pad_crop_axes_hwc/input_0.cairo @@ -0,0 +1,496 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::Complex64Tensor; +use orion::numbers::{NumberTrait, complex64}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::{FixedTrait, FP64x64}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(20); + shape.append(8); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/export_center_crop_pad_crop_axes_hwc/output_0.cairo b/tests/nodes/export_center_crop_pad_crop_axes_hwc/output_0.cairo new file mode 100644 index 000000000..2cb7243c2 --- /dev/null +++ b/tests/nodes/export_center_crop_pad_crop_axes_hwc/output_0.cairo @@ -0,0 +1,286 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::Complex64Tensor; +use orion::numbers::{NumberTrait, complex64}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::{FixedTrait, FP64x64}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(10); + shape.append(9); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/export_center_crop_pad_crop_negative_axes_hwc.cairo b/tests/nodes/export_center_crop_pad_crop_negative_axes_hwc.cairo new file mode 100644 index 000000000..470700a6e --- /dev/null +++ b/tests/nodes/export_center_crop_pad_crop_negative_axes_hwc.cairo @@ -0,0 +1,21 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::Complex64Tensor; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::Complex64TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_export_center_crop_pad_crop_negative_axes_hwc() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.center_crop_pad(TensorTrait::new(array![2].span(), array![10,9].span()), Option::Some(array![-3,-2])); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/export_center_crop_pad_crop_negative_axes_hwc/input_0.cairo b/tests/nodes/export_center_crop_pad_crop_negative_axes_hwc/input_0.cairo new file mode 100644 index 000000000..1135d5849 --- /dev/null +++ b/tests/nodes/export_center_crop_pad_crop_negative_axes_hwc/input_0.cairo @@ -0,0 +1,496 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::Complex64Tensor; +use orion::numbers::{NumberTrait, complex64}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::{FixedTrait, FP64x64}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(20); + shape.append(8); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 3, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/export_center_crop_pad_crop_negative_axes_hwc/output_0.cairo b/tests/nodes/export_center_crop_pad_crop_negative_axes_hwc/output_0.cairo new file mode 100644 index 000000000..e8147ab2f --- /dev/null +++ b/tests/nodes/export_center_crop_pad_crop_negative_axes_hwc/output_0.cairo @@ -0,0 +1,287 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::Complex64Tensor; +use orion::numbers::{NumberTrait, complex64}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::{FixedTrait, FP64x64}; + + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(10); + shape.append(9); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: true } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/export_center_crop_pad_pad.cairo b/tests/nodes/export_center_crop_pad_pad.cairo new file mode 100644 index 000000000..0345cd023 --- /dev/null +++ b/tests/nodes/export_center_crop_pad_pad.cairo @@ -0,0 +1,21 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::Complex64Tensor; +use orion::operators::tensor::Complex64TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_export_center_crop_pad_pad() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.center_crop_pad(TensorTrait::new(array![3].span(), array![20,10,3].span()), Option::None(())); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/export_center_crop_pad_pad/input_0.cairo b/tests/nodes/export_center_crop_pad_pad/input_0.cairo new file mode 100644 index 000000000..3f91bf544 --- /dev/null +++ b/tests/nodes/export_center_crop_pad_pad/input_0.cairo @@ -0,0 +1,226 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::Complex64Tensor; +use orion::numbers::{NumberTrait, complex64}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::{FixedTrait, FP64x64}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(10); + shape.append(7); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 3, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 4, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 5, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 6, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 7, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 8, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 9, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 10, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 11, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 12, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 13, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 14, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 15, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 16, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 17, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 18, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 19, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 20, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 21, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 22, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 23, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 24, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 25, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 26, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 27, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 28, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 29, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 30, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 31, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 32, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 33, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 34, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 35, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 36, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 37, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 38, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 39, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 40, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 41, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 42, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 43, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 44, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 45, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 46, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 47, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 48, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 49, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 50, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 51, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 52, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 53, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 54, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 55, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 56, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 57, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 58, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 59, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 60, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 61, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 62, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 63, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 64, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 65, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 66, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 67, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 68, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 69, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 70, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 71, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 72, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 73, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 74, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 75, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 76, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 77, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 78, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 79, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 80, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 81, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 82, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 83, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 84, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 85, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 86, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 87, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 88, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 89, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 90, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 91, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 92, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 93, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 94, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 95, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 96, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 97, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 98, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 99, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 100, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 101, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 102, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 103, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 104, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 105, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 106, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 107, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 108, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 109, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 110, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 111, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 112, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 113, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 114, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 115, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 116, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 117, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 118, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 119, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 120, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 121, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 122, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 123, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 124, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 125, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 126, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 127, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 128, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 129, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 130, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 131, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 132, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 133, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 134, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 135, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 136, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 137, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 138, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 139, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 140, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 141, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 142, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 143, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 144, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 145, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 146, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 147, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 148, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 149, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 150, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 151, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 152, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 153, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 154, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 155, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 156, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 157, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 158, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 159, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 160, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 161, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 162, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 163, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 164, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 165, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 166, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 167, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 168, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 169, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 170, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 171, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 172, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 173, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 174, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 175, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 176, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 177, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 178, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 179, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 180, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 181, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 182, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 183, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 184, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 185, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 186, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 187, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 188, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 189, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 190, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 191, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 192, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 193, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 194, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 195, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 196, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 197, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 198, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 199, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 200, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 201, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 202, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 203, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 204, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 205, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 206, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 207, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 208, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 209, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/export_center_crop_pad_pad/output_0.cairo b/tests/nodes/export_center_crop_pad_pad/output_0.cairo new file mode 100644 index 000000000..6ec454d46 --- /dev/null +++ b/tests/nodes/export_center_crop_pad_pad/output_0.cairo @@ -0,0 +1,616 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::Complex64Tensor; +use orion::numbers::{NumberTrait, complex64}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::{FixedTrait, FP64x64}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(20); + shape.append(10); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 1, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 2, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 3, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 4, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 5, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 6, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 7, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 8, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 9, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 10, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 11, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 12, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 13, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 14, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 15, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 16, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 17, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 18, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 19, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 20, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 21, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 22, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 23, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 24, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 25, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 26, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 27, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 28, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 29, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 30, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 31, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 32, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 33, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 34, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 35, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 36, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 37, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 38, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 39, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 40, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 41, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 42, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 43, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 44, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 45, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 46, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 47, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 48, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 49, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 50, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 51, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 52, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 53, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 54, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 55, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 56, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 57, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 58, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 59, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 60, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 61, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 62, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 63, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 64, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 65, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 66, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 67, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 68, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 69, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 70, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 71, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 72, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 73, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 74, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 75, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 76, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 77, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 78, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 79, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 80, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 81, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 82, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 83, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 84, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 85, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 86, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 87, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 88, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 89, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 90, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 91, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 92, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 93, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 94, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 95, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 96, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 97, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 98, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 99, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 100, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 101, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 102, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 103, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 104, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 105, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 106, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 107, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 108, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 109, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 110, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 111, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 112, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 113, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 114, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 115, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 116, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 117, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 118, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 119, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 120, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 121, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 122, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 123, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 124, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 125, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 126, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 127, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 128, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 129, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 130, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 131, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 132, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 133, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 134, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 135, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 136, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 137, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 138, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 139, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 140, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 141, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 142, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 143, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 144, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 145, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 146, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 147, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 148, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 149, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 150, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 151, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 152, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 153, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 154, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 155, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 156, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 157, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 158, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 159, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 160, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 161, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 162, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 163, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 164, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 165, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 166, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 167, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 168, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 169, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 170, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 171, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 172, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 173, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 174, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 175, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 176, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 177, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 178, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 179, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 180, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 181, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 182, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 183, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 184, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 185, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 186, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 187, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 188, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 189, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 190, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 191, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 192, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 193, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 194, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 195, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 196, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 197, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 198, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 199, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 200, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 201, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 202, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 203, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 204, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 205, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 206, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 207, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 208, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 209, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + data.append(complex64 { real: FP64x64 { mag: 0, sign: false } , img: FP64x64 { mag: 0, sign: false } }); + TensorTrait::new(shape.span(), data.span()) +} From 9e9729532869ed5d434387db075fa4f4afc6bca2 Mon Sep 17 00:00:00 2001 From: chachaleo Date: Sat, 24 Feb 2024 10:44:37 +0100 Subject: [PATCH 02/68] feat: max pool --- .../linear_classifier.predict.md | 4 +- .../linear_regressor.predict.md | 8 +- .../tree_ensemble_classifier.predict.md | 4 +- .../tree_ensemble_regressor.predict.md | 4 +- .../operators/neural-network/nn.col2im.md | 1 - .../operators/neural-network/nn.max_pool.md | 107 ++ nodegen/node/max_pool.py | 1263 +++++++++++++++++ .../tree_ensemble_classifier.cairo | 8 +- .../tree_ensemble_regressor.cairo | 12 +- src/operators/nn.cairo | 3 + src/operators/nn/common.cairo | 14 + src/operators/nn/core.cairo | 121 ++ src/operators/nn/functional.cairo | 2 + src/operators/nn/functional/common_pool.cairo | 945 ++++++++++++ src/operators/nn/functional/gemm.cairo | 2 +- src/operators/nn/functional/max_pool.cairo | 1084 ++++++++++++++ .../nn/implementations/nn_fp16x16.cairo | 27 + .../nn/implementations/nn_fp32x32.cairo | 18 + .../nn/implementations/nn_fp64x64.cairo | 18 + .../nn/implementations/nn_fp8x23.cairo | 27 + src/operators/nn/implementations/nn_i32.cairo | 15 + src/operators/nn/implementations/nn_i8.cairo | 16 + src/operators/nn/implementations/nn_u32.cairo | 16 + tests/lib.cairo | 2 + tests/nodes.cairo | 13 + tests/nodes/maxpool_1d.cairo | 30 + tests/nodes/maxpool_1d/input_0.cairo | 110 ++ tests/nodes/maxpool_1d/output_0.cairo | 62 + tests/nodes/maxpool_1d_default.cairo | 31 + tests/nodes/maxpool_1d_default/input_0.cairo | 110 ++ tests/nodes/maxpool_1d_default/output_0.cairo | 107 ++ tests/nodes/maxpool_2d.cairo | 30 + tests/nodes/maxpool_2d/input_0.cairo | 40 + tests/nodes/maxpool_2d/output_0.cairo | 19 + tests/nodes/maxpool_2d_ceil.cairo | 30 + tests/nodes/maxpool_2d_ceil/input_0.cairo | 31 + tests/nodes/maxpool_2d_ceil/output_0.cairo | 19 + tests/nodes/maxpool_2d_constraint_index.cairo | 25 + .../maxpool_2d_constraint_index/input_0.cairo | 40 + .../output_0.cairo | 19 + tests/nodes/maxpool_2d_default.cairo | 30 + tests/nodes/maxpool_2d_default/input_0.cairo | 207 +++ tests/nodes/maxpool_2d_default/output_0.cairo | 162 +++ tests/nodes/maxpool_2d_dilations.cairo | 30 + .../nodes/maxpool_2d_dilations/input_0.cairo | 31 + .../nodes/maxpool_2d_dilations/output_0.cairo | 19 + tests/nodes/maxpool_2d_pads_default.cairo | 30 + .../maxpool_2d_pads_default/input_0.cairo | 40 + .../maxpool_2d_pads_default/output_0.cairo | 40 + .../nodes/maxpool_2d_same_lower_default.cairo | 31 + .../input_0.cairo | 207 +++ .../output_0.cairo | 207 +++ tests/nodes/maxpool_2d_same_upper.cairo | 31 + .../nodes/maxpool_2d_same_upper/input_0.cairo | 40 + .../maxpool_2d_same_upper/output_0.cairo | 24 + .../nodes/maxpool_2d_same_upper_default.cairo | 31 + .../input_0.cairo | 207 +++ .../output_0.cairo | 207 +++ tests/nodes/maxpool_3d_dilations.cairo | 30 + .../nodes/maxpool_3d_dilations/input_0.cairo | 80 ++ .../nodes/maxpool_3d_dilations/output_0.cairo | 24 + tests/nodes/maxpool_4d_dilations.cairo | 30 + .../nodes/maxpool_4d_dilations/input_0.cairo | 785 ++++++++++ .../nodes/maxpool_4d_dilations/output_0.cairo | 65 + 64 files changed, 7008 insertions(+), 17 deletions(-) create mode 100644 docs/framework/operators/neural-network/nn.max_pool.md create mode 100644 nodegen/node/max_pool.py create mode 100644 src/operators/nn/common.cairo create mode 100644 src/operators/nn/functional/common_pool.cairo create mode 100644 src/operators/nn/functional/max_pool.cairo create mode 100644 tests/nodes/maxpool_1d.cairo create mode 100644 tests/nodes/maxpool_1d/input_0.cairo create mode 100644 tests/nodes/maxpool_1d/output_0.cairo create mode 100644 tests/nodes/maxpool_1d_default.cairo create mode 100644 tests/nodes/maxpool_1d_default/input_0.cairo create mode 100644 tests/nodes/maxpool_1d_default/output_0.cairo create mode 100644 tests/nodes/maxpool_2d.cairo create mode 100644 tests/nodes/maxpool_2d/input_0.cairo create mode 100644 tests/nodes/maxpool_2d/output_0.cairo create mode 100644 tests/nodes/maxpool_2d_ceil.cairo create mode 100644 tests/nodes/maxpool_2d_ceil/input_0.cairo create mode 100644 tests/nodes/maxpool_2d_ceil/output_0.cairo create mode 100644 tests/nodes/maxpool_2d_constraint_index.cairo create mode 100644 tests/nodes/maxpool_2d_constraint_index/input_0.cairo create mode 100644 tests/nodes/maxpool_2d_constraint_index/output_0.cairo create mode 100644 tests/nodes/maxpool_2d_default.cairo create mode 100644 tests/nodes/maxpool_2d_default/input_0.cairo create mode 100644 tests/nodes/maxpool_2d_default/output_0.cairo create mode 100644 tests/nodes/maxpool_2d_dilations.cairo create mode 100644 tests/nodes/maxpool_2d_dilations/input_0.cairo create mode 100644 tests/nodes/maxpool_2d_dilations/output_0.cairo create mode 100644 tests/nodes/maxpool_2d_pads_default.cairo create mode 100644 tests/nodes/maxpool_2d_pads_default/input_0.cairo create mode 100644 tests/nodes/maxpool_2d_pads_default/output_0.cairo create mode 100644 tests/nodes/maxpool_2d_same_lower_default.cairo create mode 100644 tests/nodes/maxpool_2d_same_lower_default/input_0.cairo create mode 100644 tests/nodes/maxpool_2d_same_lower_default/output_0.cairo create mode 100644 tests/nodes/maxpool_2d_same_upper.cairo create mode 100644 tests/nodes/maxpool_2d_same_upper/input_0.cairo create mode 100644 tests/nodes/maxpool_2d_same_upper/output_0.cairo create mode 100644 tests/nodes/maxpool_2d_same_upper_default.cairo create mode 100644 tests/nodes/maxpool_2d_same_upper_default/input_0.cairo create mode 100644 tests/nodes/maxpool_2d_same_upper_default/output_0.cairo create mode 100644 tests/nodes/maxpool_3d_dilations.cairo create mode 100644 tests/nodes/maxpool_3d_dilations/input_0.cairo create mode 100644 tests/nodes/maxpool_3d_dilations/output_0.cairo create mode 100644 tests/nodes/maxpool_4d_dilations.cairo create mode 100644 tests/nodes/maxpool_4d_dilations/input_0.cairo create mode 100644 tests/nodes/maxpool_4d_dilations/output_0.cairo diff --git a/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md b/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md index aec154f68..7ed30f236 100644 --- a/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md +++ b/docs/framework/operators/machine-learning/linear-classifier/linear_classifier.predict.md @@ -1,7 +1,7 @@ # LinearClassifierTrait::predict ```rust - fn predict(ref self: LinearClassifier, X: Tensor) -> Tensor; + fn predict(classifier: LinearClassifier, X: Tensor) -> Tensor; ``` Linear Classifier. Performs the linear classification. @@ -85,7 +85,7 @@ fn linear_classifier_helper( fn linear_classifier_multi_softmax() -> (Span, Tensor) { let (mut classifier, X) = linear_classifier_helper(POST_TRANSFORM::SOFTMAX); - let (labels, mut scores) = LinearClassifierTrait::predict(ref classifier, X); + let (labels, mut scores) = LinearClassifierTrait::predict(classifier, X); (labels, scores) } diff --git a/docs/framework/operators/machine-learning/linear-regressor/linear_regressor.predict.md b/docs/framework/operators/machine-learning/linear-regressor/linear_regressor.predict.md index f1bd38831..6c40ac930 100644 --- a/docs/framework/operators/machine-learning/linear-regressor/linear_regressor.predict.md +++ b/docs/framework/operators/machine-learning/linear-regressor/linear_regressor.predict.md @@ -1,14 +1,14 @@ # LinearRegressorTrait::predict ```rust - fn predict(ref self: LinearRegressor, X: Tensor) -> Tensor; + fn predict(regressor: LinearRegressor, X: Tensor) -> Tensor; ``` Linear Regressor. Performs the generalized linear regression evaluation. ## Args -* `self`: LinearRegressor - A LinearRegressor object. +* `regressor`: LinearRegressor - A LinearRegressor object. * `X`: Input 2D tensor. ## Returns @@ -68,7 +68,7 @@ fn example_linear_regressor() -> Tensor { post_transform }; - let scores = LinearRegressorTrait::predict(ref regressor, X); + let scores = LinearRegressorTrait::predict(regressor, X); scores } @@ -120,7 +120,7 @@ fn example_linear_regressor_2() -> Tensor { post_transform }; - let scores = LinearRegressorTrait::predict(ref regressor, X); + let scores = LinearRegressorTrait::predict(regressor, X); scores } diff --git a/docs/framework/operators/machine-learning/tree-ensemble-classifier/tree_ensemble_classifier.predict.md b/docs/framework/operators/machine-learning/tree-ensemble-classifier/tree_ensemble_classifier.predict.md index 6d839e873..c38f3e46d 100644 --- a/docs/framework/operators/machine-learning/tree-ensemble-classifier/tree_ensemble_classifier.predict.md +++ b/docs/framework/operators/machine-learning/tree-ensemble-classifier/tree_ensemble_classifier.predict.md @@ -1,7 +1,7 @@ # TreeEnsembleClassifier::predict ```rust - fn predict(ref self: TreeEnsembleClassifier, X: Tensor) -> (Span, MutMatrix::); + fn predict(classifier: TreeEnsembleClassifier, X: Tensor) -> (Span, MutMatrix::); ``` Tree Ensemble classifier. Returns the top class for each of N inputs. @@ -185,7 +185,7 @@ fn tree_ensemble_classifier_helper( fn test_tree_ensemble_classifier_multi_pt_softmax() -> (Span, MutMatrix::) { let (mut classifier, X) = tree_ensemble_classifier_helper(POST_TRANSFORM::SOFTMAX); - let (labels, scores) = TreeEnsembleClassifierTrait::predict(ref classifier, X); + let (labels, scores) = TreeEnsembleClassifierTrait::predict(classifier, X); (labels, scores) } diff --git a/docs/framework/operators/machine-learning/tree-ensemble-regressor/tree_ensemble_regressor.predict.md b/docs/framework/operators/machine-learning/tree-ensemble-regressor/tree_ensemble_regressor.predict.md index 812115971..243bda558 100644 --- a/docs/framework/operators/machine-learning/tree-ensemble-regressor/tree_ensemble_regressor.predict.md +++ b/docs/framework/operators/machine-learning/tree-ensemble-regressor/tree_ensemble_regressor.predict.md @@ -1,7 +1,7 @@ # TreeEnsembleRegressor::predict ```rust - fn predict(ref self: TreeEnsembleRegressor, X: Tensor) -> (Span, MutMatrix::); + fn predict(regressor: TreeEnsembleRegressor, X: Tensor) -> (Span, MutMatrix::); ``` Tree Ensemble regressor. Returns the regressed values for each input in N. @@ -160,7 +160,7 @@ fn tree_ensemble_regressor_helper( fn test_tree_ensemble_regressor_SUM() -> MutMatrix:: { let (mut regressor, X) = tree_ensemble_regressor_helper(AGGREGATE_FUNCTION::SUM); - let mut res = TreeEnsembleRegressorTrait::predict(ref regressor, X); + let mut res = TreeEnsembleRegressorTrait::predict(regressor, X); res } >>> diff --git a/docs/framework/operators/neural-network/nn.col2im.md b/docs/framework/operators/neural-network/nn.col2im.md index fd5e82ffa..6c7b1af05 100644 --- a/docs/framework/operators/neural-network/nn.col2im.md +++ b/docs/framework/operators/neural-network/nn.col2im.md @@ -1,4 +1,3 @@ - # NNTrait::col2im ```rust diff --git a/docs/framework/operators/neural-network/nn.max_pool.md b/docs/framework/operators/neural-network/nn.max_pool.md new file mode 100644 index 000000000..eb21f4c8c --- /dev/null +++ b/docs/framework/operators/neural-network/nn.max_pool.md @@ -0,0 +1,107 @@ + +# NNTrait::max_pool + +```rust + fn max_pool( + X: @Tensor, + auto_pad: Option, + ceil_mode: Option, + dilations: Option>, + kernel_shape: Span, + pads: Option>, + storage_order: Option, + strides: Option>, + output_len: usize, +) -> (Tensor, Option>); +``` + +MaxPool consumes an input tensor X and applies max pooling across the tensor according to kernel sizes, stride sizes, and pad lengths. max pooling consisting of computing the max on all values of a subset of the input tensor according to the kernel size and downsampling the data into the output tensor Y for further processing. The output spatial shape is calculated differently depending on whether explicit padding is used, where pads is employed, or auto padding is used, where auto_pad is utilized. + +## Args + +* `X`(`@Tensor`) - Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. +* `auto_pad`(`Option`) - Default is NOTSET, auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. NOTSET means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = ceil(input_shape[i] / strides[i])` for each axis `i`. +* `ceil_mode`(`Option`) - Default is 1, Whether to use ceil or floor (default) to compute the output shape. +* `dilations`(`Option>`) - Dilation value along each spatial axis of the filter. If not present, the dilation defaults to 1 along each spatial axis. +* `kernel_shape`(`Span`) - The size of the kernel along each axis. +* `pads`(`Option>`) - Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis. +* `storage_order`(`Option`) - Default is 0, The storage order of the tensor. 0 is row major, and 1 is column major. +* `strides`(`Option>`) - Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis. +* `output_len`(`Option`) - Default is 1, If set to 2, return the indices tensor. + +## Returns + +A `Tensor` that contains the result of the max pool. +A `Option>` with the indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. +## Examples + +```rust +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::operators::nn::FP16x16NN; +use orion::numbers::FP16x16; +use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor}; + + +fn example_max_pool() -> (Tensor, Option>) { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(5); + shape.append(5); + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + let mut X = TensorTrait::new(shape.span(), data.span()); + return NNTrait::max_pool( + @X, + Option::None, + Option::None, + Option::None, + array![5, 5, 5].span(), + Option::Some(array![2, 2, 2, 2].span()), + Option::None, + Option::None, + 1 + ); + +} + +>>> ([ + [ + [ + [13, 14, 15, 15, 15], + [18, 19, 20, 20, 20], + [23, 24, 25, 25, 25], + [23, 24, 25, 25, 25], + [23, 24, 25, 25, 25], + ] + ] + ], + Option::None) + + +```` diff --git a/nodegen/node/max_pool.py b/nodegen/node/max_pool.py new file mode 100644 index 000000000..9786209a2 --- /dev/null +++ b/nodegen/node/max_pool.py @@ -0,0 +1,1263 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait + +import numpy as np +import numpy as np + +from typing import Tuple, Union +from onnx.reference.ops._op_common_pool import CommonPool + + +def max_pool( + x, + auto_pad=None, + ceil_mode=None, + dilations=None, + kernel_shape=None, + pads=None, + storage_order=None, + strides=None, + output_len=None +): + if ( + dilations is not None + and (min(dilations) != max(dilations) or min(dilations) != 1) + ) or ( + strides is not None and (min(strides) != max(strides) or min(strides) != 1) + ): + return _max_pool( + x, + auto_pad=auto_pad, + ceil_mode=ceil_mode, + dilations=dilations, + kernel_shape=kernel_shape, + pads=pads, + storage_order=storage_order, + strides=strides, + output_len=output_len + ) + + return common_pool( + "MAX", + 0, + x, + auto_pad=auto_pad, + ceil_mode=ceil_mode, + dilations=dilations, + kernel_shape=kernel_shape, + pads=pads, + strides=strides, + p=1 + ) +def _max_pool( # type: ignore + + x, + auto_pad, + ceil_mode, + dilations, + kernel_shape, + pads, + storage_order, + strides, + output_len +): + if pads is None: + pads = [0 for i in range(len(kernel_shape) * 2)] + if strides is None: + strides = [1 for i in range(len(kernel_shape))] + if dilations is None: + dilations = [1 for i in range(len(kernel_shape))] + n_dims = len(kernel_shape) + new_pads = np.array([(pads[i], pads[i + n_dims]) for i in range(n_dims)]) + input_spatial_shape = x.shape[2:] + output_spatial_shape = [0 for s in input_spatial_shape] + if ceil_mode: + for i in range(len(input_spatial_shape)): + output_spatial_shape[i] = int( + np.ceil( + ( + input_spatial_shape[i] + + new_pads[i].sum() + - ((kernel_shape[i] - 1) * dilations[i] + 1) + ) + / strides[i] + + 1 + ) + ) + need_to_reduce_out_size_in_ceil_mode = ( + output_spatial_shape[i] - 1 + ) * strides[i] >= input_spatial_shape[i] + new_pads[i][0] + if need_to_reduce_out_size_in_ceil_mode: + output_spatial_shape[i] -= 1 + else: + for i in range(len(input_spatial_shape)): + output_spatial_shape[i] = int( + np.floor( + ( + input_spatial_shape[i] + + new_pads[i].sum() + - ((kernel_shape[i] - 1) * dilations[i] + 1) + ) + / strides[i] + + 1 + ) + ) + if auto_pad and auto_pad != "NOTSET": + # Deprecated attribute + if auto_pad in ("SAME_UPPER", "SAME_LOWER"): + for i in range(len(input_spatial_shape)): + if auto_pad == "SAME_UPPER": + output_spatial_shape[i] = int( + np.ceil(input_spatial_shape[i] / strides[i]) + ) + else: + output_spatial_shape[i] = int( + np.floor(input_spatial_shape[i] / strides[i]) + ) + pad_i = ( + (output_spatial_shape[i] - 1) * strides[i] + + ((kernel_shape[i] - 1) * dilations[i] + 1) + - input_spatial_shape[i] + ) + new_pads[i, 0] = pad_i // 2 + new_pads[i, 1] = pad_i - new_pads[i, 0] + else: + for i in range(len(input_spatial_shape)): + output_spatial_shape[i] = int( + np.ceil( + ( + input_spatial_shape[i] + - ((kernel_shape[i] - 1) * dilations[i] + 1) + + 1 + ) + / strides[i] + ) + ) + if len(input_spatial_shape) == 1: + return _max_pool_1d( + x, + auto_pad, + ceil_mode, + dilations, + kernel_shape, + new_pads, + storage_order, + strides, + output_spatial_shape, + output_len + ) + if len(input_spatial_shape) == 2: + return _max_pool_2d( + x, + auto_pad, + ceil_mode, + dilations, + kernel_shape, + new_pads, + storage_order, + strides, + output_spatial_shape, + output_len + ) + if len(input_spatial_shape) == 3: + return _max_pool_3d( + x, + auto_pad, + ceil_mode, + dilations, + kernel_shape, + new_pads, + storage_order, + strides, + output_spatial_shape, + output_len + ) + return _max_pool_nd( + x, + auto_pad, + ceil_mode, + dilations, + kernel_shape, + new_pads, + storage_order, + strides, + output_spatial_shape, + output_len + ) +def _max_pool_1d( # type: ignore + + x, + auto_pad, + ceil_mode, + dilations, + kernel_shape, + new_pads, + storage_order, + strides, + output_spatial_shape, + output_len +): + global_pooling = False + y_dims = x.shape[:2] + tuple(output_spatial_shape) + y = np.zeros(y_dims, dtype=x.dtype) + indices = np.full(y_dims, dtype=np.int64, fill_value=-1) + x_dims = x.shape + channels = x_dims[1] + height = x_dims[2] + pooled_height = y_dims[2] + total_channels = x_dims[0] * channels + stride_h = 1 if global_pooling else strides[0] + x_step = height + y_step = pooled_height + dilation_h = dilations[0] + X_data = x.ravel() + Y_data = y.ravel() + I_data = indices.ravel() + def iteration(c): + x_d = c * x_step + y_d = c * y_step + i_d = c * y_step + for ph in range(pooled_height): + hstart = ph * stride_h - new_pads[0, 0] + hend = hstart + kernel_shape[0] * dilation_h + Yh = None + h_index = -1 + for h in range(hstart, hend, dilation_h): + if h < 0 or h >= height: + continue + if Yh is None or X_data[x_d + h] > Yh: + Yh = X_data[x_d + h] + h_index = h + Y_data[y_d + ph] = Yh + I_data[i_d + ph] = c * x_step + h_index + for c in range(total_channels): + iteration(c) + if output_len == 1: # type: ignore + return (Y_data.reshape(y_dims),) + return (Y_data.reshape(y_dims), I_data.reshape(y_dims)) +def _max_pool_2d( # type: ignore + + x, + auto_pad, + ceil_mode, + dilations, + kernel_shape, + new_pads, + storage_order, + strides, + output_spatial_shape, + output_len +): + global_pooling = False + y_dims = x.shape[:2] + tuple(output_spatial_shape) + y = np.zeros(y_dims, dtype=x.dtype) + indices = np.full(y_dims, dtype=np.int64, fill_value=-1) + x_dims = x.shape + channels = x_dims[1] + height = x_dims[2] + width = x_dims[3] if len(kernel_shape) > 1 else 1 + pooled_height = y_dims[2] + pooled_width = y_dims[3] if len(kernel_shape) > 1 else 1 + total_channels = x_dims[0] * channels + stride_h = 1 if global_pooling else strides[0] + stride_w = 1 if global_pooling else strides[1] + x_step = height * width + y_step = pooled_height * pooled_width + dilation_h = dilations[0] + dilation_w = dilations[1] + X_data = x.ravel() + Y_data = y.ravel() + I_data = indices.ravel() + def iteration(c): # type: ignore + x_d = c * x_step # X_data + y_d = c * y_step # Y_data + for ph in range(pooled_height): + hstart = ph * stride_h - new_pads[0, 0] + hend = hstart + kernel_shape[0] * dilation_h + for pw in range(pooled_width): + wstart = pw * stride_w - new_pads[1, 0] + wend = wstart + kernel_shape[1] * dilation_w + + pool_index = ph * pooled_width + pw + Yh = None + h_index = -1 + w_index = -1 + for h in range(hstart, hend, dilation_h): + if h < 0 or h >= height: + continue + for w in range(wstart, wend, dilation_w): + if w < 0 or w >= width: + continue + input_index = h * width + w + if input_index < 0 or input_index > X_data.shape[0]: + continue + if Yh is None or X_data[x_d + input_index] > Yh: + Yh = X_data[x_d + input_index] + h_index = h + w_index = w + if Yh is None: + continue + Y_data[y_d + pool_index] = Yh + I_data[y_d + pool_index] = ( + c * x_step + h_index * width + w_index + if storage_order == 0 + else c * x_step + h_index + w_index * height + ) + for c in range(total_channels): + iteration(c) + if output_len == 1: # type: ignore + return (Y_data.reshape(y_dims),) + return (Y_data.reshape(y_dims), I_data.reshape(y_dims)) +def _max_pool_3d( # type: ignore + + x, + auto_pad, + ceil_mode, + dilations, + kernel_shape, + new_pads, + storage_order, + strides, + output_spatial_shape, + output_len +): + global_pooling = False + y_dims = x.shape[:2] + tuple(output_spatial_shape) + y = np.zeros(y_dims, dtype=x.dtype) + indices = np.full(y_dims, dtype=np.int64, fill_value=-1) + x_dims = x.shape + channels = x_dims[1] + height = x_dims[2] + width = x_dims[3] if len(kernel_shape) > 1 else 1 + depth = x_dims[4] if len(kernel_shape) > 2 else 1 + pooled_height = y_dims[2] + pooled_width = y_dims[3] if len(kernel_shape) > 1 else 1 + pooled_depth = y_dims[4] if len(kernel_shape) > 2 else 1 + total_channels = x_dims[0] * channels + stride_h = 1 if global_pooling else strides[0] + stride_w = 1 if global_pooling else strides[1] + stride_d = 1 if global_pooling else strides[2] + x_step = height * width * depth + y_step = pooled_height * pooled_width * pooled_depth + dilation_h = dilations[0] + dilation_w = dilations[1] + dilation_d = dilations[2] + X_data = x.ravel() + Y_data = y.ravel() + I_data = indices.ravel() + def iteration(c): + x_d = c * x_step + y_d = c * y_step + i_d = c * y_step + for ph in range(pooled_height): + hstart = ph * stride_h - new_pads[0, 0] + hend = hstart + kernel_shape[0] * dilation_h + for pw in range(pooled_width): + wstart = pw * stride_w - new_pads[1, 0] + wend = wstart + kernel_shape[1] * dilation_w + for pd in range(pooled_depth): + dstart = pd * stride_d - new_pads[2, 0] + dend = dstart + kernel_shape[2] * dilation_d + pool_index = ( + ph * pooled_width * pooled_depth + pw * pooled_depth + pd + ) + Yh = None + h_index = -1 + w_index = -1 + d_index = -1 + for h in range(hstart, hend, dilation_h): + if h < 0 or h >= height: + continue + for w in range(wstart, wend, dilation_w): + if w < 0 or w >= width: + continue + for d in range(dstart, dend, dilation_d): + if d < 0 or d >= depth: + continue + input_index = h * width * depth + w * depth + d + if Yh is None or X_data[x_d + input_index] > Yh: + Yh = X_data[x_d + input_index] + h_index = h + w_index = w + d_index = d + + + Y_data[y_d + pool_index] = Yh + I_data[i_d + pool_index] = ( + ( + c * x_step + + h_index * width * depth + + w_index * depth + + d_index + ) + if storage_order == 0 + else ( + c * x_step + + h_index + + w_index * height + + d_index * height * width + ) + ) + for c in range(total_channels): + iteration(c) + if output_len == 1: # type: ignore + return (Y_data.reshape(y_dims),) + return (Y_data.reshape(y_dims), I_data.reshape(y_dims)) +def stride(arr): + stride = np.zeros(len(arr)) + acc = 1 + for i in range(len(arr)): + stride[i] = acc + acc *= arr[-(i + 1)] + return np.flip(stride) +def reverse_stride(arr): + stride = np.zeros(len(arr)) + acc = 1 + for i in range(len(arr)): + acc *= arr[i] + stride[i] = acc + + return stride + + +def _max_pool_nd( # type: ignore + + x, + auto_pad, + ceil_mode, + dilations, + kernel_shape, + new_pads, + storage_order, + strides, + output_spatial_shape, + output_len +): + nd = len(x.shape[2:]) + y_dims = x.shape[:2] + tuple(output_spatial_shape) + y = np.zeros(y_dims, dtype=x.dtype) + indices = np.full(y_dims, dtype=np.int64, fill_value=-1) + x_dims = x.shape + channels = x_dims[1] + x_stride = stride(x.shape) + y_stride = stride(y_dims) + total_channels = x_dims[0] * channels + x_step = x_stride[1] + y_step = y_stride[1] + X_data = x.ravel() + Y_data = y.ravel() + I_data = indices.ravel() + def iteration(c): + x_d = int(c * x_step) + y_d = int(c * y_step) + + for p in range(int(y_step)): + pool_index = p + flatten_index = p + + nstart = np.zeros(nd) + nend = np.zeros(nd) + nstep = np.zeros(nd) + + for n in range(nd): + pn, rem = divmod(flatten_index, y_stride[n + 2]) + flatten_index = rem + + ns = pn * strides[n] - new_pads[n, 0] + nstart[n] = ns + nend[n] = ns + kernel_shape[n] * dilations[n] + + nstep[n] = np.ceil((nend[n] - ns) / dilations[n]) + + nstride = stride(nstep) + max_iter = int(nstep[0] * nstride[0]) + n_index = np.full(y_dims, dtype=np.int64, fill_value=-1) + Yh = None + + for i in range(max_iter): + flatten_index = i + is_outside = False + input_index = 0 + + i_index = np.zeros(nd) + + for n in range(nd): + item, rem = divmod(flatten_index, nstride[n]) + flatten_index = rem + + item_ = item * dilations[n] + nstart[n] + if item_ < 0 or item_ >= x.shape[2 + n]: + is_outside = True + i_index[n] = item_ + input_index += item_ * x_stride[2 + n] + + input_index = int(input_index) + if is_outside == False: + if input_index < 0 or input_index > X_data.shape[0]: + continue + if Yh is None or X_data[x_d + input_index] > Yh: + Yh = X_data[x_d + input_index] + n_index = i_index + + + Y_data[y_d + p] = Yh + + for c in range(total_channels): + iteration(c) + if output_len == 1: # type: ignore + return (Y_data.reshape(y_dims),) + return (Y_data.reshape(y_dims), I_data.reshape(y_dims)) + + + +import itertools +import math +from typing import Sequence, Tuple, Union +import numpy as np + + + +def get_pad_shape( + auto_pad: str, + input_spatial_shape: Sequence[int], + kernel_spatial_shape: Sequence[int], + strides_spatial: Sequence[int], + output_spatial_shape: Sequence[int], +) -> Sequence[int]: + spatial_dims = len(input_spatial_shape) + pad_shape = [0] * spatial_dims + strides_spatial = strides_spatial or [1] * spatial_dims + if auto_pad in ("SAME_UPPER", "SAME_LOWER"): + for i in range(spatial_dims): + pad_shape[i] = ( + (output_spatial_shape[i] - 1) * strides_spatial[i] + + kernel_spatial_shape[i] + - input_spatial_shape[i] + ) + elif auto_pad == "VALID": + pass + + return pad_shape +def get_pad_with_auto_pad(auto_pad: str, pad_shape: Sequence[int]) -> Sequence[int]: + spatial_dims = len(pad_shape) + if auto_pad == "SAME_UPPER": + pads = [pad_shape[i] // 2 for i in range(spatial_dims)] + [ + pad_shape[i] - pad_shape[i] // 2 for i in range(spatial_dims) + ] + elif auto_pad == "SAME_LOWER": + pads = [pad_shape[i] - pad_shape[i] // 2 for i in range(spatial_dims)] + [ + pad_shape[i] // 2 for i in range(spatial_dims) + ] + else: + pads = [0] * spatial_dims * 2 # no padding + return pads + +def get_output_shape_explicit_padding( + pads: Sequence[int], + input_spatial_shape: Sequence[int], + kernel_spatial_shape: Sequence[int], + strides_spatial: Sequence[int], + dilations: Union[Sequence[int], None] = None, + ceil_mode: bool = False, +) -> Tuple[Sequence[int], Sequence[int]]: + + output_spatial_shape = [0] * len(input_spatial_shape) + pads = pads or [0] * len(input_spatial_shape) * 2 + strides_spatial = strides_spatial or [1] * len(input_spatial_shape) + dims = len(input_spatial_shape) + if dilations is None: + dilations = np.ones([dims], dtype=np.int64) + + for dim in range(dims): + dim_size = ( + input_spatial_shape[dim] + + pads[dim] + + pads[dims + dim] + - dilations[dim] * (kernel_spatial_shape[dim] - 1) + - 1 + ) / strides_spatial[dim] + 1 + + if ceil_mode: + output_spatial_shape[dim] = int(np.ceil(dim_size)) + else: + output_spatial_shape[dim] = int(np.floor(dim_size)) + + pads_spatial_shape_new = pads[:] + for dim in range(dims): + sliding_window_size = (kernel_spatial_shape[dim] - 1) * dilations[dim] + 1 + actual_padded_input_size = (output_spatial_shape[dim] - 1) * strides_spatial[ + dim + ] + sliding_window_size + extra_pad = ( + actual_padded_input_size + - input_spatial_shape[dim] + - pads[dim] + - pads[dims + dim] + ) + if extra_pad > 0: + pads_spatial_shape_new[dim] += extra_pad // 2 + pads_spatial_shape_new[dims + dim] += extra_pad - extra_pad // 2 + + return output_spatial_shape, pads_spatial_shape_new + +def get_output_shape_auto_pad( + auto_pad: str, + input_spatial_shape: Sequence[int], + kernel_spatial_shape: Sequence[int], + strides_spatial: Sequence[int], +) -> Sequence[int]: + strides_spatial = strides_spatial or [1] * len(input_spatial_shape) + out_shape = [0] * len(input_spatial_shape) + for i in range(len(input_spatial_shape)): + if auto_pad in ("SAME_UPPER", "SAME_LOWER"): + out_shape[i] = ( + math.floor((input_spatial_shape[i] - 1) / strides_spatial[i]) + 1 + ) + elif auto_pad == "VALID": + out_shape[i] = ( + math.floor( + (input_spatial_shape[i] - kernel_spatial_shape[i]) + / strides_spatial[i] + ) + + 1 + ) + else: + raise ValueError( + "auto_pad can only be NOTSET, SAME_UPPER, SAME_LOWER, or VALID" + ) + + return out_shape + +def lp_pool(x: np.array, p: int) -> float: + y = 0 + for v in np.nditer(x): + y += abs(v) ** p + return y ** (1.0 / p) + +def pool( + padded: np.ndarray, + x_shape: Sequence[int], + kernel: Sequence[int], + strides: Sequence[int], + out_shape: Sequence[int], + pooling_type: str, + pads: Union[Sequence[int], None] = None, + dilations: Union[Sequence[int], None] = None, + count_include_pad: int = 0, + p: int = 1, +) -> np.ndarray: + spatial_size = len(x_shape) - 2 + y = np.zeros([x_shape[0], x_shape[1], *list(out_shape)], dtype=padded.dtype) + if dilations is None: + dilations = np.ones([spatial_size], dtype=np.int64) + if pads is None: + pads = np.zeros([spatial_size * 2], dtype=np.int64) + elif len(pads) == 1: + pads = pads * spatial_size * 2 + strides = strides or [1] * spatial_size + + def lp_pool_p(x): + return lp_pool(x, p) + + + + for shape in itertools.product( + range(x_shape[0]), + range(x_shape[1]), + *[ + range( + int( + ( + x_shape[i + 2] + + pads[i] + + pads[i + spatial_size] + - (1 + (kernel[i] - 1) * dilations[i]) + ) + / strides[i] + + 1 + ) + ) + for i in range(spatial_size) + ], + ): + window = padded[shape[0], shape[1]] + window_vals = np.array( + [ + window[i] + for i in list( + itertools.product( + *[ + range( + strides[i] * shape[i + 2], + strides[i] * shape[i + 2] + + (1 + (kernel[i] - 1) * dilations[i]), + dilations[i], + ) + for i in range(spatial_size) + ] + ) + ) + ] + ) + if pooling_type == "AVG": + f = np.average + elif pooling_type == "MAX": + f = np.max + elif pooling_type == "LPPOOL": + f = lp_pool_p + else: + raise NotImplementedError( + f"Pooling type {pooling_type} does not support. Should be AVG, MAX" + ) + + if count_include_pad == 1 and (pooling_type in {"AVG", "LPPOOL"}): + y[shape] = f(window_vals) + else: + y[shape] = f(window_vals[np.where(~np.isnan(window_vals))]) + return y + + +def common_pool( + pooling_type, + count_include_pad, + x, + auto_pad=None, + ceil_mode=None, + dilations=None, + kernel_shape=None, + pads=None, + strides=None, + p=None, +): + x_shape = np.shape(x) + pading_value = np.nan if pooling_type == "MAX" or count_include_pad == 0 else 0 + if auto_pad in ["SAME_UPPER", "SAME_LOWER", "VALID"]: + assert ( + ceil_mode is None or ceil_mode == 0 + ), "ceil_mode is not supported with auto_pad" + out_shape = get_output_shape_auto_pad( + auto_pad, x.shape[2:], kernel_shape, strides + ) + pads_shape = get_pad_shape( + auto_pad, x_shape[2:], kernel_shape, strides, out_shape + ) + pads = get_pad_with_auto_pad(auto_pad, pads_shape) + n_dims = len(pads) // 2 + pads_np = [(pads[i], pads[i + n_dims]) for i in range(n_dims)] + padded = np.pad( + x, + ((0, 0), (0, 0), *pads_np), + mode="constant", + constant_values=pading_value, + ) + y = pool( + padded, + x_shape, + kernel_shape, + strides, + out_shape, + pooling_type, + pads, + dilations, + count_include_pad, + p, + ) + return (y,) + else: + out_shape, pads = get_output_shape_explicit_padding( + pads, x_shape[2:], kernel_shape, strides, dilations, ceil_mode + ) + # convert pads from [x1_begin, x2_begin,...,x1_end, x2_end,...] to [(x1_begin, x1_end), (x2_begin, x2_end),...] + n_dims = len(pads) // 2 + pads_np = [(pads[i], pads[i + n_dims]) for i in range(n_dims)] + padded = np.pad( + x, + ((0, 0), (0, 0), *pads_np), + mode="constant", + constant_values=pading_value, + ) + y = pool( + padded, + x_shape, + kernel_shape, + strides, + out_shape, + pooling_type, + pads, + dilations, + count_include_pad, + p, + ) + return (y,) + +class Max_pool(RunAll): + + @staticmethod + def export_maxpool_1d() -> None: + + x = np.random.randn(1, 3, 32).astype(np.float32) + kernel_shape = np.array([2]) + strides = np.array([2]) + padded = x + y = max_pool(padded, kernel_shape=kernel_shape, strides=strides,output_len=1) + y = np.array(y[0]) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + + name = "maxpool_1d" + func_sig = "NNTrait::max_pool(" + func_sig += "@input_0," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "array![2].span()," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![2].span())," + func_sig += "1)" + make_test( + [x], y, func_sig, name, Trait.NN) + + @staticmethod + def export_maxpool_1d_default() -> None: + + x = np.random.randn(1, 3, 32).astype(np.float32) + kernel_shape = np.array([2]) + padded = x + y = max_pool(padded, kernel_shape=kernel_shape,output_len=1) + y = np.array(y[0]) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + + name = "maxpool_1d_default" + func_sig = "NNTrait::max_pool(" + func_sig += "@input_0," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "array![2].span()," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "1)" + make_test( + [x], y, func_sig, name, Trait.NN) + + + @staticmethod + def export_maxpool_2d() -> None: + x = np.array( + [ + [ + [ + [1, 2, 3, 4, 5], + [6, 7, 8, 9, 10], + [11, 12, 13, 14, 15], + [16, 17, 18, 19, 20], + [21, 22, 23, 24, 25], + ] + ] + ] + ).astype(np.float32) + + kernel_shape=(2, 2) + strides=(2, 2) + padded = x + y = max_pool(padded,strides = strides,kernel_shape=kernel_shape,output_len=1) + y = np.array(y[0]) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + + name = "maxpool_2d" + func_sig = "NNTrait::max_pool(" + func_sig += "@input_0," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "array![2, 2].span()," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![2, 2].span())," + func_sig += "1)" + make_test( + [x], y, func_sig, name, Trait.NN) + + @staticmethod + def export_maxpool_2d_default() -> None: + x = np.random.randn(1, 3, 8, 8).astype(np.float32) + kernel_shape = (2, 2) + padded = x + y = max_pool(padded, kernel_shape=kernel_shape, output_len=1) + y = np.array(y[0]) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "maxpool_2d_default" + func_sig = "NNTrait::max_pool(" + func_sig += "@input_0," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "array![2, 2].span()," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "1)" + make_test( + [x], y, func_sig, name, Trait.NN) + + + def export_maxpool_2d_pads_default() -> None: + x = np.array( + [ + [ + [ + [1, 2, 3, 4, 5], + [6, 7, 8, 9, 10], + [11, 12, 13, 14, 15], + [16, 17, 18, 19, 20], + [21, 22, 23, 24, 25], + ] + ] + ] + ).astype(np.uint8) + kernel_shape=(5, 5) + pads=(2, 2, 2, 2) + padded = x + y = max_pool(padded,pads = pads,kernel_shape=kernel_shape,output_len=1) + y = np.array(y[0]) + + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "maxpool_2d_pads_default" + func_sig = "NNTrait::max_pool(" + func_sig += "@input_0," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "array![5, 5].span()," + func_sig += "Option::Some(array![2, 2, 2, 2].span())," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "1)" + make_test( + [x], y, func_sig, name, Trait.NN) + + @staticmethod + def export_maxpool_2d_constraint_index() -> None: + x = np.array( + [ + [ + [ + [1, 2, 3, 4, 5], + [6, 7, 8, 9, 10], + [11, 12, 13, 14, 15], + [16, 17, 18, 19, 20], + [21, 22, 23, 24, 25], + ] + ] + ] + ).astype(np.float32) + y = np.array([[[[7, 9], [17, 19]]]]).astype(np.float32) + z = np.array([[[[6, 16], [8, 18]]]]).astype(np.int64) + + kernel_shape=(2, 2) + strides=(2, 2) + padded = x + (y, z) = max_pool(padded,strides = strides,kernel_shape=kernel_shape,output_len=2, storage_order=1) + + y = np.array(y) + z = np.array(z) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + z = Tensor(Dtype.I32, z.shape, z.flatten()) + + + name = "maxpool_2d_constraint_index" + func_sig = "NNTrait::max_pool(" + func_sig += "@input_0," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "array![2, 2].span()," + func_sig += "Option::None," + func_sig += "Option::Some(1)," + func_sig += "Option::Some(array![2, 2].span())," + func_sig += "1)" + make_test( + [x], z, func_sig, name, Trait.NN) + + @staticmethod + def export_maxpool_2d_same_upper() -> None: + x = np.array( + [ + [ + [ + [1, 2, 3, 4, 5], + [6, 7, 8, 9, 10], + [11, 12, 13, 14, 15], + [16, 17, 18, 19, 20], + [21, 22, 23, 24, 25], + ] + ] + ] + ).astype(np.float32) + + kernel_shape=(3, 3) + strides=(2, 2) + padded = x + y = max_pool(padded,strides = strides,kernel_shape=kernel_shape,auto_pad="SAME_UPPER") + y = np.array(y[0]) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + + name = "maxpool_2d_same_upper" + func_sig = "NNTrait::max_pool(" + func_sig += "@input_0," + func_sig += "Option::Some(AUTO_PAD::SAME_UPPER)," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "array![3, 3].span()," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![2, 2].span())," + func_sig += "1)" + make_test( + [x], y, func_sig, name, Trait.NN) + + @staticmethod + def export_maxpool_2d_same_upper_default() -> None: + x = np.random.randn(1, 3, 8, 8).astype(np.float32) + kernel_shape = (2, 2) + padded = x + y = max_pool(padded,auto_pad="SAME_UPPER", kernel_shape=kernel_shape, output_len=1) + y = np.array(y[0]) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "maxpool_2d_same_upper_default" + func_sig = "NNTrait::max_pool(" + func_sig += "@input_0," + func_sig += "Option::Some(AUTO_PAD::SAME_UPPER)," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "array![2, 2].span()," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "1)" + make_test( + [x], y, func_sig, name, Trait.NN) + + @staticmethod + def export_maxpool_2d_same_lower_default() -> None: + x = np.random.randn(1, 3, 8, 8).astype(np.float32) + x_shape = np.shape(x) + kernel_shape = (2, 2) + padded = x + y = max_pool(padded,auto_pad="SAME_LOWER", kernel_shape=kernel_shape, output_len=1) + y = np.array(y[0]) + + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "maxpool_2d_same_lower_default" + func_sig = "NNTrait::max_pool(" + func_sig += "@input_0," + func_sig += "Option::Some(AUTO_PAD::SAME_LOWER)," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "array![2, 2].span()," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "1)" + make_test( + [x], y, func_sig, name, Trait.NN) + + @staticmethod + def export_maxpool_2d_ceil() -> None: + x = np.array( + [ + [ + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ] + ] + ] + ).astype(np.float32) + y = np.array([[[[11, 12], [15, 16]]]]).astype(np.float32) + + kernel_shape = (3, 3) + strides = (2, 2) + padded = x + y = max_pool(padded,strides = strides, ceil_mode = True,kernel_shape=kernel_shape, output_len=1) + y = np.array(y[0]) + + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "maxpool_2d_ceil" + func_sig = "NNTrait::max_pool(" + func_sig += "@input_0," + func_sig += "Option::None," + func_sig += "Option::Some(1)," + func_sig += "Option::None," + func_sig += "array![3, 3].span()," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![2, 2].span())," + func_sig += "1)" + make_test( + [x], y, func_sig, name, Trait.NN) + + @staticmethod + def export_maxpool_2d_dilations() -> None: + x = np.array( + [ + [ + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ] + ] + ] + ).astype(np.float32) + + kernel_shape = (2 , 2) + dilations = (2, 2) + padded = x + y = max_pool(padded,dilations = dilations, ceil_mode = True,kernel_shape=kernel_shape, output_len=1) + y = np.array(y[0]) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + + name = "maxpool_2d_dilations" + func_sig = "NNTrait::max_pool(" + func_sig += "@input_0," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![2, 2].span())," + func_sig += "array![2, 2].span()," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "1)" + make_test( + [x], y, func_sig, name, Trait.NN) + + @staticmethod + def export_maxpool_3d_dilations() -> None: + + x = np.array( + [ + [ + [ + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ], + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ], + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ], + [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], + ], + ] + ] + ] + ).astype(np.float32) + kernel_shape=(2, 2, 2) + strides=(1, 1, 1) + dilations=(2, 2, 2) + padded = x + y = max_pool(padded, dilations=dilations, kernel_shape=kernel_shape, strides=strides,output_len=1) + y = np.array(y[0]) + + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "maxpool_3d_dilations" + func_sig = "NNTrait::max_pool(" + func_sig += "@input_0," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![2, 2, 2].span())," + func_sig += "array![2, 2, 2].span()," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![1, 1, 1].span())," + func_sig += "1)" + make_test( + [x], y, func_sig, name, Trait.NN) + + @staticmethod + def export_maxpool_4d_dilations() -> None: + x = np.random.randn(1, 3, 4, 4, 4, 4).astype(np.float32) + x_shape = np.shape(x) + kernel_shape = (2, 2, 2, 2) + strides = (1, 1, 1, 1) + dilations = (2, 2, 2, 2) + padded = x + y = max_pool(padded,dilations = dilations, ceil_mode = True,kernel_shape=kernel_shape, output_len=1) + y = np.array(y[0]) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "maxpool_4d_dilations" + func_sig = "NNTrait::max_pool(" + func_sig += "@input_0," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![2, 2, 2, 2].span())," + func_sig += "array![2, 2, 2, 2].span()," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "1)" + make_test( + [x], y, func_sig, name, Trait.NN) + + + + + \ No newline at end of file diff --git a/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo b/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo index c9edfd51c..4cc3c3134 100644 --- a/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo +++ b/src/operators/ml/tree_ensemble/tree_ensemble_classifier.cairo @@ -240,7 +240,9 @@ trait TreeEnsembleClassifierTrait { /// ]) /// ``` /// - fn predict(classifier: TreeEnsembleClassifier, X: Tensor) -> (Span, MutMatrix::); + fn predict( + classifier: TreeEnsembleClassifier, X: Tensor + ) -> (Span, MutMatrix::); } impl TreeEnsembleClassifierImpl< @@ -259,7 +261,9 @@ impl TreeEnsembleClassifierImpl< +Div, +Mul > of TreeEnsembleClassifierTrait { - fn predict(classifier: TreeEnsembleClassifier, X: Tensor) -> (Span, MutMatrix::) { + fn predict( + classifier: TreeEnsembleClassifier, X: Tensor + ) -> (Span, MutMatrix::) { let mut classifier = classifier; let leaves_index = classifier.ensemble.leave_index_tree(X); let n_classes = classifier.classlabels.len(); diff --git a/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo b/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo index 136af9aa0..3f9998e96 100644 --- a/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo +++ b/src/operators/ml/tree_ensemble/tree_ensemble_regressor.cairo @@ -299,12 +299,18 @@ impl TreeEnsembleRegressorImpl< let mut t_index = t_index.span(); match regressor.aggregate_function { - AGGREGATE_FUNCTION::SUM => { compute_res_SUM(ref regressor, ref res, ref t_index, i); }, + AGGREGATE_FUNCTION::SUM => { + compute_res_SUM(ref regressor, ref res, ref t_index, i); + }, AGGREGATE_FUNCTION::AVERAGE => { compute_res_AVERAGE(ref regressor, ref res, ref t_index, n_trees, i); }, - AGGREGATE_FUNCTION::MIN => { compute_res_MIN(ref regressor, ref res, ref t_index, i); }, - AGGREGATE_FUNCTION::MAX => { compute_res_MAX(ref regressor, ref res, ref t_index, i); }, + AGGREGATE_FUNCTION::MIN => { + compute_res_MIN(ref regressor, ref res, ref t_index, i); + }, + AGGREGATE_FUNCTION::MAX => { + compute_res_MAX(ref regressor, ref res, ref t_index, i); + }, }; i += 1; }; diff --git a/src/operators/nn.cairo b/src/operators/nn.cairo index 625e63216..42755a102 100644 --- a/src/operators/nn.cairo +++ b/src/operators/nn.cairo @@ -1,6 +1,9 @@ mod core; mod implementations; mod functional; +mod common; + +use orion::operators::nn::common::{AUTO_PAD, POOLING_TYPE}; use orion::operators::nn::core::NNTrait; diff --git a/src/operators/nn/common.cairo b/src/operators/nn/common.cairo new file mode 100644 index 000000000..d10ad8430 --- /dev/null +++ b/src/operators/nn/common.cairo @@ -0,0 +1,14 @@ +#[derive(Copy, Drop)] +enum AUTO_PAD { + NOTSET, + SAME_UPPER, + SAME_LOWER, + VALID +} + +#[derive(Copy, Drop)] +enum POOLING_TYPE { + AVG, + LPPOOL, + MAX, +} diff --git a/src/operators/nn/core.cairo b/src/operators/nn/core.cairo index 93f9242c0..032880942 100644 --- a/src/operators/nn/core.cairo +++ b/src/operators/nn/core.cairo @@ -1,4 +1,5 @@ use orion::operators::tensor::core::Tensor; +use orion::operators::nn::AUTO_PAD; /// Trait /// @@ -1304,4 +1305,124 @@ trait NNTrait { mode: Option, padding_mode: Option, ) -> Tensor; + /// + /// # NNTrait::max_pool + /// + /// ```rust + /// fn max_pool( + /// X: @Tensor, + /// auto_pad: Option, + /// ceil_mode: Option, + /// dilations: Option>, + /// kernel_shape: Span, + /// pads: Option>, + /// storage_order: Option, + /// strides: Option>, + /// output_len: usize, + /// ) -> (Tensor, Option>); + /// ``` + /// + /// MaxPool consumes an input tensor X and applies max pooling across the tensor according to kernel sizes, stride sizes, and pad lengths. max pooling consisting of computing the max on all values of a subset of the input tensor according to the kernel size and downsampling the data into the output tensor Y for further processing. The output spatial shape is calculated differently depending on whether explicit padding is used, where pads is employed, or auto padding is used, where auto_pad is utilized. + /// + /// ## Args + /// + /// * `X`(`@Tensor`) - Input data tensor from the previous operator; dimensions for image case are (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size. + /// * `auto_pad`(`Option`) - Default is NOTSET, auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. NOTSET means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = ceil(input_shape[i] / strides[i])` for each axis `i`. + /// * `ceil_mode`(`Option`) - Default is 1, Whether to use ceil or floor (default) to compute the output shape. + /// * `dilations`(`Option>`) - Dilation value along each spatial axis of the filter. If not present, the dilation defaults to 1 along each spatial axis. + /// * `kernel_shape`(`Span`) - The size of the kernel along each axis. + /// * `pads`(`Option>`) - Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis. + /// * `storage_order`(`Option`) - Default is 0, The storage order of the tensor. 0 is row major, and 1 is column major. + /// * `strides`(`Option>`) - Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis. + /// * `output_len`(`Option`) - Default is 1, If set to 2, return the indices tensor. + /// + /// ## Returns + /// + /// A `Tensor` that contains the result of the max pool. + /// A `Option>` with the indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. + /// ## Examples + /// + /// ```rust + /// use orion::operators::nn::NNTrait; + /// use orion::numbers::FixedTrait; + /// use orion::operators::nn::FP16x16NN; + /// use orion::numbers::FP16x16; + /// use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor}; + /// + /// + /// fn example_max_pool() -> (Tensor, Option>) { + /// let mut shape = ArrayTrait::::new(); + /// shape.append(1); + /// shape.append(1); + /// shape.append(5); + /// shape.append(5); + /// let mut data = ArrayTrait::new(); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 131072, sign: false }); + /// data.append(FP16x16 { mag: 196608, sign: false }); + /// data.append(FP16x16 { mag: 262144, sign: false }); + /// data.append(FP16x16 { mag: 327680, sign: false }); + /// data.append(FP16x16 { mag: 393216, sign: false }); + /// data.append(FP16x16 { mag: 458752, sign: false }); + /// data.append(FP16x16 { mag: 524288, sign: false }); + /// data.append(FP16x16 { mag: 589824, sign: false }); + /// data.append(FP16x16 { mag: 655360, sign: false }); + /// data.append(FP16x16 { mag: 720896, sign: false }); + /// data.append(FP16x16 { mag: 786432, sign: false }); + /// data.append(FP16x16 { mag: 851968, sign: false }); + /// data.append(FP16x16 { mag: 917504, sign: false }); + /// data.append(FP16x16 { mag: 983040, sign: false }); + /// data.append(FP16x16 { mag: 1048576, sign: false }); + /// data.append(FP16x16 { mag: 1114112, sign: false }); + /// data.append(FP16x16 { mag: 1179648, sign: false }); + /// data.append(FP16x16 { mag: 1245184, sign: false }); + /// data.append(FP16x16 { mag: 1310720, sign: false }); + /// data.append(FP16x16 { mag: 1376256, sign: false }); + /// data.append(FP16x16 { mag: 1441792, sign: false }); + /// data.append(FP16x16 { mag: 1507328, sign: false }); + /// data.append(FP16x16 { mag: 1572864, sign: false }); + /// data.append(FP16x16 { mag: 1638400, sign: false }); + /// let mut X = TensorTrait::new(shape.span(), data.span()); + /// return NNTrait::max_pool( + /// @X, + /// Option::None, + /// Option::None, + /// Option::None, + /// array![5, 5, 5].span(), + /// Option::Some(array![2, 2, 2, 2].span()), + /// Option::None, + /// Option::None, + /// 1 + /// ); + /// + /// } + /// + /// >>> ([ + /// [ + /// [ + /// [13, 14, 15, 15, 15], + /// [18, 19, 20, 20, 20], + /// [23, 24, 25, 25, 25], + /// [23, 24, 25, 25, 25], + /// [23, 24, 25, 25, 25], + /// ] + /// ] + /// ], + /// Option::None) + /// + /// + /// ```` + /// + + fn max_pool( + X: @Tensor, + auto_pad: Option, + ceil_mode: Option, + dilations: Option>, + kernel_shape: Span, + pads: Option>, + storage_order: Option, + strides: Option>, + output_len: usize, + ) -> (Tensor, Option>); } diff --git a/src/operators/nn/functional.cairo b/src/operators/nn/functional.cairo index 45e1c1ec9..f02570148 100644 --- a/src/operators/nn/functional.cairo +++ b/src/operators/nn/functional.cairo @@ -16,3 +16,5 @@ mod conv_transpose; mod depth_to_space; mod space_to_depth; mod conv; +mod max_pool; +mod common_pool; diff --git a/src/operators/nn/functional/common_pool.cairo b/src/operators/nn/functional/common_pool.cairo new file mode 100644 index 000000000..02d8826ce --- /dev/null +++ b/src/operators/nn/functional/common_pool.cairo @@ -0,0 +1,945 @@ +use core::clone::Clone; +use core::option::OptionTrait; +use core::array::ArrayTrait; +use orion::numbers::NumberTrait; +use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor, I32Tensor}; +use orion::operators::vec::{NullableVec, NullableVecImpl}; +use orion::operators::tensor::core::{stride}; +use core::debug::PrintTrait; +use core::traits::Into; +use orion::numbers::{U32IntoI32, I32IntoU32, I32Div, I32Number}; +use orion::numbers::FP16x16; +use orion::operators::nn::{AUTO_PAD, POOLING_TYPE}; + + +fn common_pool< + T, + MAG, + +TensorTrait, + +NumberTrait, + +Copy, + +Drop, + +Add, + +Mul, + +Sub, + +Div, + +AddEq, + +PrintTrait, + +PartialOrd, + +PartialEq, + +TryInto, + +Into, + +Into, + +Rem, + +Neg, + +SubEq, +>( + pooling_type: POOLING_TYPE, + count_include_pad: usize, + X: @Tensor, + auto_pad: Option, + ceil_mode: Option, + dilations: Option>, + kernel_shape: Span, + pads: Option>, + strides: Option>, + p: usize, +) -> (Tensor, Option>) { + let padding_value: T = match pooling_type { + POOLING_TYPE::AVG => { + let padding_value = if count_include_pad == 0 { + NumberTrait::min_value() + } else { + NumberTrait::zero() + }; + padding_value + }, + POOLING_TYPE::LPPOOL => { + let padding_value = if count_include_pad == 0 { + NumberTrait::min_value() + } else { + NumberTrait::zero() + }; + padding_value + }, + POOLING_TYPE::MAX => { NumberTrait::min_value() }, + }; + + let ceil_mode = match ceil_mode { + Option::Some(ceil_mode) => { ceil_mode }, + Option::None => { 0 }, + }; + + let auto_pad = match auto_pad { + Option::Some(auto_pad) => auto_pad, + Option::None => AUTO_PAD::NOTSET, + }; + + let (out_shape, pads, padded) = match auto_pad { + AUTO_PAD::NOTSET => { + let (out_shape, pads) = get_output_shape_explicit_padding( + pads, + SpanTrait::slice((*X).shape, 2, (*X).shape.len() - 2), + kernel_shape, + strides, + dilations, + ceil_mode + ); + let padded = pad_constant_value(X, padding_value, pads); + (out_shape, pads, padded) + }, + AUTO_PAD::SAME_UPPER => { + assert(ceil_mode == 0, 'ceil mode not supp with autopad'); + let out_shape = get_output_shape_auto_pad( + auto_pad, + SpanTrait::slice((*X).shape, 2, (*X).shape.len() - 2), + kernel_shape, + strides + ); + let pads_shape = get_pad_shape( + auto_pad, + SpanTrait::slice((*X).shape, 2, (*X).shape.len() - 2), + kernel_shape, + strides, + out_shape + ); + + let pads = get_pad_with_auto_pad(auto_pad, pads_shape); + + let padded = pad_constant_value(X, padding_value, pads); + (out_shape, pads, padded) + }, + AUTO_PAD::SAME_LOWER => { + assert(ceil_mode == 0, 'ceil mode not supp with autopad'); + let out_shape = get_output_shape_auto_pad( + auto_pad, + SpanTrait::slice((*X).shape, 2, (*X).shape.len() - 2), + kernel_shape, + strides + ); + let pads_shape = get_pad_shape( + auto_pad, + SpanTrait::slice((*X).shape, 2, (*X).shape.len() - 2), + kernel_shape, + strides, + out_shape + ); + + let pads = get_pad_with_auto_pad(auto_pad, pads_shape); + + let padded = pad_constant_value(X, padding_value, pads); + (out_shape, pads, padded) + }, + AUTO_PAD::VALID => { + assert(ceil_mode == 0, 'ceil mode not supp with autopad'); + let out_shape = get_output_shape_auto_pad( + auto_pad, + SpanTrait::slice((*X).shape, 2, (*X).shape.len() - 2), + kernel_shape, + strides + ); + let pads_shape = get_pad_shape( + auto_pad, + SpanTrait::slice((*X).shape, 2, (*X).shape.len() - 2), + kernel_shape, + strides, + out_shape + ); + + let pads = get_pad_with_auto_pad(auto_pad, pads_shape); + + let padded = pad_constant_value(X, padding_value, pads); + (out_shape, pads, padded) + }, + }; + + return ( + pool( + @padded, + (*X).shape, + kernel_shape, + strides, + out_shape, + pooling_type, + pads, + dilations, + count_include_pad, + p, + ), + Option::None + ); +} + + +fn pool< + T, + MAG, + +TensorTrait, + +NumberTrait, + +Copy, + +Drop, + +Add, + +Mul, + +Sub, + +Div, + +AddEq, + +PrintTrait, + +PartialOrd, + +PartialEq, + +TryInto, + +Into, + +Into, + +Rem, + +Neg, + +SubEq, +>( + padded: @Tensor, + x_shape: Span, + kernel: Span, + strides: Option>, + out_shape: Span, + pooling_type: POOLING_TYPE, + pads: Span, + dilations: Option>, + count_include_pad: usize, + p: usize, +) -> Tensor { + let n_dims = x_shape.len() - 2; + let mut y = NullableVecImpl::new(); + + let dilations = match dilations { + Option::Some(dilations) => dilations, + Option::None => { + let mut dilations = ArrayTrait::new(); + let mut i = 0; + loop { + if i == n_dims { + break; + } + dilations.append(1); + i += 1; + }; + dilations.span() + }, + }; + + let strides = match strides { + Option::Some(strides) => strides, + Option::None => { + let mut strides = ArrayTrait::new(); + let mut i = 0; + loop { + if i == n_dims { + break; + } + strides.append(1); + i += 1; + }; + strides.span() + }, + }; + let mut y_shape = array![*x_shape.at(0), *x_shape.at(1)]; + let mut i = 0; + loop { + if i == n_dims { + break; + } + let a: T = NumberTrait::new_unscaled( + (*x_shape.at(i + 2) + *pads.at(i) + *pads.at(i + n_dims)).into(), false + ); + let b: T = NumberTrait::new_unscaled( + ((1 + (*kernel.at(i) - 1) * *dilations.at(i))).into(), false + ); + let c: T = NumberTrait::new_unscaled((*strides.at(i)).into(), false); + y_shape.append(NumberTrait::floor(((a - b) / c + NumberTrait::one())).try_into().unwrap()); + i += 1; + }; + let y_stride = stride(y_shape.span()); + let padded_stride = stride(*padded.shape); + let mut all_coords = get_all_coords(y_shape.span()); + + loop { + match all_coords.pop_front() { + Option::Some(coord) => { + let coord = *coord; + let window = SpanTrait::slice( + *padded.data, + *coord.at(0) * *padded_stride.at(0) + *coord.at(1) * *padded_stride.at(1), + *padded_stride.at(1) + ); + let window_stride = SpanTrait::slice(padded_stride, 2, n_dims); + let mut window_vals = ArrayTrait::new(); + + let mut all_indices = ArrayTrait::new(); + + let mut i = 0; + loop { + if i == n_dims { + break; + } + let start = *strides.at(i) * *coord.at(i + 2); + let end = start + 1 + (*kernel.at(i) - 1) * *dilations.at(i); + let step = *dilations.at(i); + + all_indices.append(arange(start, end, step)); + + i += 1; + }; + + let mut all_indices = cartesian(all_indices.span()); + + loop { + match all_indices.pop_front() { + Option::Some(index) => { + let flatten_index = flatten_index((*index), window_stride); + + window_vals.append(*window.at(flatten_index)); + }, + Option::None => { break; } + } + }; + match pooling_type { + POOLING_TYPE::AVG => { + let flatten_index = flatten_index(coord, y_stride); + + if count_include_pad == 1 { + y.set(flatten_index, average(window_vals.span())); + } else { + y.set(flatten_index, average(window_vals.span())); + } + }, + POOLING_TYPE::LPPOOL => { panic(array!['supported soon']) }, + POOLING_TYPE::MAX => { + let flatten_index = flatten_index(coord, y_stride); + + y.set(flatten_index, max(window_vals.span())); + } + } + }, + Option::None => { break; }, + } + }; + let mut y_data = ArrayTrait::new(); + let mut i = 0; + loop { + if i == y.len() { + break; + } + y_data.append(y.at(i)); + i += 1; + }; + return TensorTrait::new(y_shape.span(), y_data.span()); +} + +fn get_output_shape_auto_pad( + auto_pad: AUTO_PAD, + input_spatial_shape: Span, + kernel_spatial_shape: Span, + strides_spatial: Option>, +) -> Span { + let n_dims = input_spatial_shape.len(); + let mut out_shape = ArrayTrait::new(); + + let strides_spatial = match strides_spatial { + Option::Some(strides_spatial) => strides_spatial, + Option::None => { + let mut strides_spatial = ArrayTrait::new(); + let mut i = 0; + loop { + if i == n_dims { + break; + } + strides_spatial.append(1); + i += 1; + }; + strides_spatial.span() + }, + }; + + match auto_pad { + AUTO_PAD::NOTSET => { panic(array!['not supported!']) }, + AUTO_PAD::SAME_UPPER => { + let mut i = 0; + loop { + if i == input_spatial_shape.len() { + break; + } + out_shape.append((*input_spatial_shape.at(i) - 1) / *strides_spatial.at(i) + 1); + i += 1; + }; + out_shape.span() + }, + AUTO_PAD::SAME_LOWER => { + let mut i = 0; + loop { + if i == input_spatial_shape.len() { + break; + } + out_shape.append((*input_spatial_shape.at(i) - 1) / *strides_spatial.at(i) + 1); + i += 1; + }; + out_shape.span() + }, + AUTO_PAD::VALID => { + let mut i = 0; + loop { + if i == input_spatial_shape.len() { + break; + } + out_shape + .append( + (*input_spatial_shape.at(i) - *kernel_spatial_shape.at(i)) + / *strides_spatial.at(i) + + 1 + ); + i += 1; + }; + out_shape.span() + }, + } +} + +fn get_output_shape_explicit_padding( + pads: Option>, + input_spatial_shape: Span, + kernel_spatial_shape: Span, + strides_spatial: Option>, + dilations: Option>, + ceil_mode: usize, +) -> (Span, Span) { + let n_dims = input_spatial_shape.len(); + let pads = match pads { + Option::Some(pads) => pads, + Option::None => { + let mut pads = ArrayTrait::new(); + let mut i = 0; + loop { + if i == n_dims { + break; + } + pads.append(0); + pads.append(0); + i += 1; + }; + pads.span() + }, + }; + let dilations = match dilations { + Option::Some(dilations) => dilations, + Option::None => { + let mut dilations = ArrayTrait::new(); + let mut i = 0; + loop { + if i == n_dims { + break; + } + dilations.append(1); + i += 1; + }; + dilations.span() + }, + }; + let strides_spatial = match strides_spatial { + Option::Some(strides_spatial) => strides_spatial, + Option::None => { + let mut strides_spatial = ArrayTrait::new(); + let mut i = 0; + loop { + if i == n_dims { + break; + } + strides_spatial.append(1); + i += 1; + }; + strides_spatial.span() + }, + }; + let mut output_spatial_shape = ArrayTrait::::new(); + + let mut d = 0; + loop { + if d == n_dims { + break; + } + let dim_num: FP16x16 = NumberTrait::new_unscaled( + (*input_spatial_shape.at(d) + + *pads.at(d) + + *pads.at(d + n_dims) + - *dilations.at(d) * (*kernel_spatial_shape.at(d) - 1) + - 1) + .into(), + false + ); + let dim_den = NumberTrait::new_unscaled((*strides_spatial.at(d) + 1).into(), false); + + let dim_size = dim_num / dim_den; + + let oss = if ceil_mode == 1 { + NumberTrait::ceil(dim_size) + } else { + NumberTrait::floor(dim_size) + }; + output_spatial_shape.append(oss.try_into().unwrap()); + + d += 1; + }; + let output_spatial_shape = output_spatial_shape.span(); + + let mut pads_spatial_shape_new_1 = ArrayTrait::new(); + let mut pads_spatial_shape_new_2 = ArrayTrait::new(); + + let mut d = 0; + loop { + if d == n_dims { + break; + } + let sliding_window_size = (*kernel_spatial_shape.at(d) - 1) * *dilations.at(d) + 1; + let actual_padded_input_size = (*output_spatial_shape.at(d) - 1) * *strides_spatial.at(d) + + sliding_window_size; + let extra_pad_sub = I32Number::new( + (*input_spatial_shape.at(d) + *pads.at(d) + *pads.at(d + n_dims)).into(), false + ); + let extra_pad = I32Number::new((actual_padded_input_size).into(), false) - extra_pad_sub; + + if extra_pad > 0 { + pads_spatial_shape_new_1.append(*pads.at(d) + extra_pad.into() / 2); + pads_spatial_shape_new_2.append(*pads.at(d) + extra_pad.into() - extra_pad.into() / 2); + } else { + pads_spatial_shape_new_1.append(*pads.at(d)); + pads_spatial_shape_new_2.append(*pads.at(d + n_dims)); + }; + d += 1; + }; + + let mut pads_spatial_shape_new = ArrayTrait::new(); + pads_spatial_shape_new.append_span(pads_spatial_shape_new_1.span()); + pads_spatial_shape_new.append_span(pads_spatial_shape_new_2.span()); + + return (output_spatial_shape, pads_spatial_shape_new.span()); +} + + +fn get_pad_shape( + auto_pad: AUTO_PAD, + input_spatial_shape: Span, + kernel_spatial_shape: Span, + strides_spatial: Option>, + output_spatial_shape: Span, +) -> Span { + let spatial_dims = input_spatial_shape.len(); + let mut pad_shape = ArrayTrait::new(); + + let strides_spatial = match strides_spatial { + Option::Some(strides_spatial) => strides_spatial, + Option::None => { + let mut strides_spatial = ArrayTrait::new(); + let mut i = 0; + loop { + if i == spatial_dims { + break; + } + strides_spatial.append(1); + i += 1; + }; + strides_spatial.span() + }, + }; + + match auto_pad { + AUTO_PAD::NOTSET => { panic(array!['not supported!']) }, + AUTO_PAD::SAME_UPPER => { + let mut i = 0; + loop { + if i == spatial_dims { + break; + } + pad_shape + .append( + (*output_spatial_shape.at(i) - 1) * *strides_spatial.at(i) + + *kernel_spatial_shape.at(i) + - *input_spatial_shape.at(i) + ); + i += 1; + }; + pad_shape.span() + }, + AUTO_PAD::SAME_LOWER => { + let mut i = 0; + loop { + if i == spatial_dims { + break; + } + pad_shape + .append( + (*output_spatial_shape.at(i) - 1) * *strides_spatial.at(i) + + *kernel_spatial_shape.at(i) + - *input_spatial_shape.at(i) + ); + i += 1; + }; + pad_shape.span() + }, + AUTO_PAD::VALID => { + let mut i = 0; + loop { + if i == input_spatial_shape.len() { + break; + } + pad_shape.append(0); + i += 1; + }; + pad_shape.span() + }, + } +} + + +fn get_pad_with_auto_pad(auto_pad: AUTO_PAD, mut pad_shape: Span,) -> Span { + let spatial_dims = pad_shape.len(); + let mut pads = ArrayTrait::new(); + + match auto_pad { + AUTO_PAD::NOTSET => { array![].span() }, + AUTO_PAD::SAME_UPPER => { + let mut pads_1 = ArrayTrait::new(); + let mut pads_2 = ArrayTrait::new(); + + loop { + match pad_shape.pop_front() { + Option::Some(v) => { + pads_1.append(*v / 2); + pads_2.append(*v - *v / 2); + }, + Option::None => { + pads.append_span(pads_1.span()); + pads.append_span(pads_2.span()); + break pads.span(); + } + } + } + }, + AUTO_PAD::SAME_LOWER => { + let mut pads_1 = ArrayTrait::new(); + let mut pads_2 = ArrayTrait::new(); + + loop { + match pad_shape.pop_front() { + Option::Some(v) => { + pads_1.append(*v - *v / 2); + pads_2.append(*v / 2); + }, + Option::None => { + pads.append_span(pads_1.span()); + pads.append_span(pads_2.span()); + break pads.span(); + } + } + } + }, + AUTO_PAD::VALID => { + let mut i = 0; + loop { + if i == spatial_dims { + break; + } + pads.append(0); + pads.append(0); + i += 1; + }; + pads.span() + }, + } +} + +// X dimension : N x C x d1 x ... x dn, Padding on dimensions d1, ..., dn +fn pad_constant_value< + T, MAG, +TensorTrait, +NumberTrait, +Copy, +Drop, +PrintTrait +>( + mut X: @Tensor, constant_value: T, pads: Span +) -> Tensor { + let n_dims = pads.len() / 2; + let N = *(*X).shape.at(0); + let C = *(*X).shape.at(1); + + let mut padded_shape = array![N, C]; + + let mut i = 0; + loop { + if i == n_dims { + break; + } + padded_shape.append(*(*X).shape.at(i + 2) + *pads.at(i) + *pads.at(i + n_dims)); + i += 1; + }; + let x_stride = stride((*X).shape); + let padded_stride = stride(padded_shape.span()); + + let window_len = *x_stride.at(1); + let full_len = *padded_shape.at(0) * *padded_stride.at(0); + + let mut x_padded = full(full_len, constant_value); + + let total_channel = N * C; + + let mut c = 0; + loop { + if c == total_channel { + break; + } + + let mut i = 0; + loop { + if i == window_len { + break; + } + let mut padded_index = c * *padded_stride.at(1); + let mut flatten_index = i; + + let mut n = 0; + loop { + if n == n_dims { + break; + } + let (ind, rem) = DivRem::div_rem( + flatten_index, (*x_stride.at(2 + n)).try_into().unwrap() + ); + flatten_index = rem; + padded_index += (ind + *pads.at(n)) * *padded_stride.at(2 + n); + n += 1; + }; + + x_padded.set(padded_index, *(*X).data.at(c * window_len + i)); + i += 1; + }; + c += 1; + }; + + let mut padded = ArrayTrait::new(); + let mut i = 0; + loop { + if i == x_padded.len() { + break; + } + padded.append(x_padded.at(i)); + i += 1; + }; + return TensorTrait::new(padded_shape.span(), padded.span()); +} + + +// return a span of len ceil((end - start) / step) +fn full, +NumberTrait, +Copy, +Drop,>( + len: usize, fill_value: T +) -> NullableVec { + let mut full = NullableVecImpl::new(); + let mut i = 0; + loop { + if i == len { + break; + } + full.set(i, fill_value); + i += 1; + }; + return full; +} + + +fn flatten_index(index: Span, stride: Span) -> usize { + let mut flatten_index = 0; + let n = index.len(); + + let mut i = 0; + loop { + if i == n { + break; + } + flatten_index += *index.at(i) * *stride.at(i); + i += 1; + }; + + return flatten_index; +} + + +fn get_all_coords(shape: Span) -> Span> { + let mut all_indices = ArrayTrait::new(); + + let mut i = 0; + loop { + if i == shape.len() { + break; + } + all_indices.append(arange(0, *shape.at(i), 1)); + i += 1; + }; + + return cartesian(all_indices.span()); +} + +fn cartesian(mut arrays: Span>,) -> Span> { + let mut n = 1; + let mut i = arrays.len() - 1; + loop { + n = n * (*(arrays.at(i))).len(); + if i == 0 { + break; + } + i -= 1; + }; + + let mut i = 0; + let mut size_arrays = ArrayTrait::new(); + loop { + if i == arrays.len() { + break; + } + size_arrays.append((*(arrays.at(i))).len()); + + i += 1; + }; + let size_arrays = size_arrays.span(); + let mut output_arrays = ArrayTrait::>::new(); + let mut m = n; + + let mut i = 0; + loop { + if i == arrays.len() { + break; + } + m = m / (*(arrays.at(i))).len(); + let mut out = repeat(*(arrays.at(i)), m); + out = repeat_2(out, size_arrays, i); + + output_arrays.append(out); + i += 1; + }; + let output_arrays = output_arrays.span(); + + let mut i = 0; + let mut ret = ArrayTrait::new(); + loop { + if i == n { + break; + } + let mut j = 0; + let mut x = ArrayTrait::new(); + loop { + if j == arrays.len() { + break; + } + + x.append(*(output_arrays.at(j)).at(i)); + j += 1; + }; + ret.append(x.span()); + i += 1; + }; + + return ret.span(); +} + + +fn repeat_2(mut array: Array, size_array: Span, index: usize) -> Array { + let mut size = array.len(); + let mut i = 0; + loop { + if i == index { + break; + } + let mut j = 1; + loop { + if j == *size_array.at(index - 1 - i) { + break; + } + let mut k = 0; + loop { + if k == size { + break; + } + array.append(*array.at(k)); + k += 1; + }; + j += 1; + }; + size = size * *size_array.at(index - 1 - i); + i += 1; + }; + array +} + +fn repeat(array: Span, m: usize,) -> Array { + let mut out = ArrayTrait::new(); + let mut j = 0; + loop { + if j == array.len() { + break; + } + let mut k = 0; + loop { + if k == m { + break; + } + out.append(*array.at(j)); + k += 1; + }; + j += 1; + }; + + out +} + + +fn arange(start: usize, end: usize, step: usize) -> Span { + let mut arr = ArrayTrait::new(); + let mut i = start; + loop { + if i >= end { + break; + } + arr.append(i); + i += step; + }; + return arr.span(); +} + + +fn max, +Drop, +Copy, +PartialOrd,>(mut a: Span) -> T { + assert(a.len() > 0, 'span cannot be empty'); + + let mut max = *a.at(0); + loop { + match a.pop_front() { + Option::Some(v) => { if *v > max { + max = *v; + }; }, + Option::None => { break max; } + }; + } +} + + +fn average< + T, + MAG, + +NumberTrait, + +Into, + +AddEq, + +Drop, + +Copy, + +PartialOrd, + +Div +>( + mut a: Span +) -> T { + assert(a.len() > 0, 'span cannot be empty'); + + let mut sum = *a.at(0); + let n = NumberTrait::new_unscaled((a.len()).into(), false); + loop { + match a.pop_front() { + Option::Some(v) => { sum += *v; }, + Option::None => { break sum / n; } + }; + } +} + diff --git a/src/operators/nn/functional/gemm.cairo b/src/operators/nn/functional/gemm.cairo index e5b997731..b67754301 100644 --- a/src/operators/nn/functional/gemm.cairo +++ b/src/operators/nn/functional/gemm.cairo @@ -52,4 +52,4 @@ fn gemm< }, Option::None(_) => { return mul_by_scalar(@A.matmul(@B), alpha); } } -} \ No newline at end of file +} diff --git a/src/operators/nn/functional/max_pool.cairo b/src/operators/nn/functional/max_pool.cairo new file mode 100644 index 000000000..69e060a2b --- /dev/null +++ b/src/operators/nn/functional/max_pool.cairo @@ -0,0 +1,1084 @@ +use core::clone::Clone; +use core::option::OptionTrait; +use core::array::ArrayTrait; +use orion::numbers::NumberTrait; +use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor, I32Tensor}; +use orion::operators::vec::{NullableVec, NullableVecImpl}; +use orion::operators::tensor::core::{stride}; +use core::debug::PrintTrait; +use core::traits::Into; +use orion::numbers::{U32IntoI32, I32IntoU32, I32Div, I32Number}; + +use orion::operators::nn::functional::common_pool::{common_pool}; +use orion::operators::nn::{AUTO_PAD, POOLING_TYPE}; + +/// Cf: NNTrait::max_pool docstring +fn max_pool< + T, + MAG, + +TensorTrait, + +NumberTrait, + +Copy, + +Drop, + +Add, + +Mul, + +Sub, + +Div, + +AddEq, + +PrintTrait, + +PartialOrd, + +PartialEq, + +TryInto, + +Into, + +Into, + +Rem, + +Neg, + +SubEq, +>( + X: @Tensor, + auto_pad: Option, + ceil_mode: Option, + dilations: Option>, + kernel_shape: Span, + pads: Option>, + storage_order: Option, + strides: Option>, + output_len: usize, +) -> (Tensor, Option>) { + match dilations { + Option::Some(dilations) => { + if (min(dilations) != max(dilations) || min(dilations) != 1) { + max_pool_implementation( + X, + auto_pad, + ceil_mode, + Option::Some(dilations), + kernel_shape, + pads, + storage_order, + strides, + output_len, + ) + } else { + match strides { + Option::Some(strides) => { + if (min(strides) != max(strides) || min(strides) != 1) { + max_pool_implementation( + X, + auto_pad, + ceil_mode, + Option::Some(dilations), + kernel_shape, + pads, + storage_order, + Option::Some(strides), + output_len, + ) + } else { + common_pool( + POOLING_TYPE::MAX, + 0, + X, + auto_pad, + ceil_mode, + Option::Some(dilations), + kernel_shape, + pads, + Option::Some(strides), + 1, + ) + } + }, + Option::None => { + common_pool( + POOLING_TYPE::MAX, + 0, + X, + auto_pad, + ceil_mode, + Option::Some(dilations), + kernel_shape, + pads, + Option::None, + 1, + ) + }, + } + } + }, + Option::None => { + match strides { + Option::Some(strides) => { + if (min(strides) != max(strides) || min(strides) != 1) { + max_pool_implementation( + X, + auto_pad, + ceil_mode, + Option::None, + kernel_shape, + pads, + storage_order, + Option::Some(strides), + output_len, + ) + } else { + common_pool( + POOLING_TYPE::MAX, + 0, + X, + auto_pad, + ceil_mode, + Option::None, + kernel_shape, + pads, + Option::Some(strides), + 1, + ) + } + }, + Option::None => { + common_pool( + POOLING_TYPE::MAX, + 0, + X, + auto_pad, + ceil_mode, + Option::None, + kernel_shape, + pads, + Option::None, + 1, + ) + }, + } + } + } +} + + +fn max_pool_implementation< + T, + MAG, + +TensorTrait, + +NumberTrait, + +Copy, + +Drop, + +Add, + +Mul, + +Sub, + +Div, + +AddEq, + +PrintTrait, + +PartialOrd, + +PartialEq, + +TryInto, + +Into, + +Into, + +Rem, + +Neg, + +SubEq, +>( + X: @Tensor, + auto_pad: Option, + ceil_mode: Option, + dilations: Option>, + kernel_shape: Span, + pads: Option>, + storage_order: Option, + strides: Option>, + output_len: usize, +) -> (Tensor, Option>) { + assert((*X).shape.len() >= 3, 'X must have at least 3 dim'); + let n_dims = kernel_shape.len(); + + let pads = match pads { + Option::Some(pads) => pads, + Option::None => { + let mut pads = ArrayTrait::new(); + let mut i = 0; + loop { + if i == n_dims { + break; + } + pads.append(0); + pads.append(0); + i += 1; + }; + pads.span() + }, + }; + let dilations = match dilations { + Option::Some(dilations) => dilations, + Option::None => { + let mut dilations = ArrayTrait::new(); + let mut i = 0; + loop { + if i == n_dims { + break; + } + dilations.append(1); + i += 1; + }; + dilations.span() + }, + }; + let strides = match strides { + Option::Some(strides) => strides, + Option::None => { + let mut strides = ArrayTrait::new(); + let mut i = 0; + loop { + if i == n_dims { + break; + } + strides.append(1); + i += 1; + }; + strides.span() + }, + }; + + let auto_pad = match auto_pad { + Option::Some(auto_pad) => auto_pad, + Option::None => AUTO_PAD::NOTSET, + }; + + let storage_order = match storage_order { + Option::Some(storage_order) => storage_order, + Option::None => 0, + }; + + let input_spatial_shape = SpanTrait::slice((*X).shape, 2, (*X).shape.len() - 2); + + let ceil_mode = match ceil_mode { + Option::Some(ceil_mode) => ceil_mode, + Option::None => 0, + }; + + let output_spatial_shape = if ceil_mode == 1 { + let mut output_spatial_shape = ArrayTrait::::new(); + let mut i = 0; + loop { + if i == input_spatial_shape.len() { + break; + } + let oss: T = NumberTrait::ceil( + (NumberTrait::new_unscaled( + (*input_spatial_shape.at(i) + *pads.at(i) + *pads.at(i + n_dims)).into(), false + ) + - NumberTrait::new_unscaled( + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1).into(), false + )) + / NumberTrait::new_unscaled((*strides.at(i)).into(), false) + + NumberTrait::one() + ); + + let need_to_reduce_out_size_in_ceil_mode = (oss.try_into().unwrap() - 1) + * *strides.at(i) >= *input_spatial_shape.at(i) + + *pads.at(i); + if need_to_reduce_out_size_in_ceil_mode { + output_spatial_shape.append(oss.try_into().unwrap() - 1); + } else { + output_spatial_shape.append(oss.try_into().unwrap()); + }; + i += 1; + }; + + output_spatial_shape.span() + } else { + let mut output_spatial_shape = ArrayTrait::::new(); + let mut i = 0; + loop { + if i == input_spatial_shape.len() { + break; + } + let oss: T = NumberTrait::floor( + (NumberTrait::new_unscaled( + (*input_spatial_shape.at(i) + *pads.at(i) + *pads.at(i + n_dims)).into(), false + ) + - NumberTrait::new_unscaled( + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1).into(), false + )) + / NumberTrait::new_unscaled((*strides.at(i)).into(), false) + + NumberTrait::one() + ); + output_spatial_shape.append(oss.try_into().unwrap()); + i += 1; + }; + output_spatial_shape.span() + }; + + let (pads, output_spatial_shape) = match auto_pad { + AUTO_PAD::NOTSET => { (pads, output_spatial_shape) }, + AUTO_PAD::SAME_UPPER => { + let mut output_spatial_shape = ArrayTrait::::new(); + let mut pad_1 = ArrayTrait::new(); + let mut pad_2 = ArrayTrait::new(); + let mut pads = ArrayTrait::new(); + + let mut i = 0; + loop { + if i == input_spatial_shape.len() { + break; + } + let oss: T = NumberTrait::ceil( + NumberTrait::new_unscaled((*input_spatial_shape.at(i)).into(), false) + / NumberTrait::new_unscaled((*strides.at(i)).into(), false) + ); + output_spatial_shape.append(oss.try_into().unwrap()); + + let pad_i = (*output_spatial_shape[i] - 1) * *strides[i] + + ((*kernel_shape[i] - 1) * *dilations[i] + 1) + - *input_spatial_shape[i]; + + pad_1.append(pad_i / 2); + pad_2.append(pad_i - (pad_i / 2)); + + i += 1; + }; + + pads.append_span(pad_1.span()); + pads.append_span(pad_2.span()); + + (pads.span(), output_spatial_shape.span()) + }, + AUTO_PAD::SAME_LOWER => { + let mut output_spatial_shape = ArrayTrait::::new(); + let mut pad_1 = ArrayTrait::new(); + let mut pad_2 = ArrayTrait::new(); + let mut pads = ArrayTrait::new(); + + let mut i = 0; + loop { + if i == input_spatial_shape.len() { + break; + } + + let oss: T = NumberTrait::floor( + NumberTrait::new_unscaled((*input_spatial_shape.at(i)).into(), false) + / NumberTrait::new_unscaled((*strides.at(i)).into(), false) + ); + output_spatial_shape.append(oss.try_into().unwrap()); + + let pad_i = (*output_spatial_shape[i] - 1) * *strides[i] + + ((*kernel_shape[i] - 1) * *dilations[i] + 1) + - *input_spatial_shape[i]; + + pad_1.append(pad_i / 2); + pad_2.append(pad_i - (pad_i / 2)); + + i += 1; + }; + + pads.append_span(pad_1.span()); + pads.append_span(pad_2.span()); + + (pads.span(), output_spatial_shape.span()) + }, + AUTO_PAD::VALID => { + let mut output_spatial_shape = ArrayTrait::::new(); + let mut i = 0; + loop { + if i == input_spatial_shape.len() { + break; + } + let oss: T = NumberTrait::ceil( + (NumberTrait::new_unscaled((*input_spatial_shape.at(i)).into(), false) + - NumberTrait::new_unscaled( + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1).into(), false + ) + + NumberTrait::one()) + / NumberTrait::new_unscaled((*strides.at(i)).into(), false) + ); + output_spatial_shape.append(oss.try_into().unwrap()); + + i += 1; + }; + + (pads, output_spatial_shape.span()) + }, + }; + + let nd = input_spatial_shape.len(); + if nd == 1 { + return max_pool_1d( + X, + auto_pad, + ceil_mode, + dilations, + kernel_shape, + pads, + storage_order, + strides, + output_spatial_shape, + output_len + ); + } + if nd == 2 { + return max_pool_2d( + X, + auto_pad, + ceil_mode, + dilations, + kernel_shape, + pads, + storage_order, + strides, + output_spatial_shape, + output_len + ); + } + if nd == 3 { + return max_pool_3d( + X, + auto_pad, + ceil_mode, + dilations, + kernel_shape, + pads, + storage_order, + strides, + output_spatial_shape, + output_len + ); + } + + return max_pool_nd( + X, + auto_pad, + ceil_mode, + dilations, + kernel_shape, + pads, + storage_order, + strides, + output_spatial_shape, + output_len + ); +} + + +fn max_pool_1d, +NumberTrait, +Copy, +Drop, +PartialOrd,>( + X: @Tensor, + auto_pad: AUTO_PAD, + ceil_mode: usize, + dilations: Span, + kernel_shape: Span, + pads: Span, + storage_order: usize, + strides: Span, + output_spatial_shape: Span, + output_len: usize, +) -> (Tensor, Option>) { + let mut y_dims = ArrayTrait::new(); + y_dims.append_span(SpanTrait::slice((*X).shape, 0, 2)); + y_dims.append_span(output_spatial_shape); + + let N = *(*X).shape.at(0); + let C = *(*X).shape.at(1); + + let x_step = *(*X).shape.at(2); + let y_step = *y_dims.at(2); + + let total_channels = N * C; + + let stride_h = I32Number::new((*strides.at(0)).into(), false); + let dilation_h = I32Number::new((*dilations.at(0)).into(), false); + let ks_h = I32Number::new((*kernel_shape.at(0)).into(), false); + let pad_h = I32Number::new((*pads.at(0)).into(), false); + + let mut Y_data = ArrayTrait::new(); + let mut I_data = ArrayTrait::new(); + + let mut c = 0; + loop { + if c == total_channels { + break; + } + let x_d = c * x_step; + + let mut ph = 0; + loop { + if ph == y_step { + break; + } + let hstart = I32Number::new((ph).into(), false) * stride_h - pad_h; + let hend = hstart + ks_h * dilation_h; + + let mut h_index = I32Number::new(1, true); + let mut Yh: T = NumberTrait::min_value(); + + let mut h = hstart; + loop { + if h >= hend { + break; + } + if h >= 0 && h < x_step.into() { + if *(*X).data.at(x_d + h.into()) > Yh { + h_index = h.into(); + Yh = (*(*X).data.at(x_d + h.into())); + } + } + h += dilation_h; + }; + + Y_data.append(Yh); + I_data.append((c * x_step).into() + h_index); + + ph += 1; + }; + c += 1; + }; + if output_len == 1 { + return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); + } + return ( + TensorTrait::new(y_dims.span(), Y_data.span()), + Option::Some(TensorTrait::new(y_dims.span(), I_data.span())) + ); +} + +fn max_pool_2d< + T, + MAG, + +TensorTrait, + +NumberTrait, + +Copy, + +Drop, + +PartialOrd, + +PartialEq, + +PrintTrait +>( + X: @Tensor, + auto_pad: AUTO_PAD, + ceil_mode: usize, + dilations: Span, + kernel_shape: Span, + pads: Span, + storage_order: usize, + strides: Span, + output_spatial_shape: Span, + output_len: usize, +) -> (Tensor, Option>) { + let mut y_dims = ArrayTrait::new(); + y_dims.append_span(SpanTrait::slice((*X).shape, 0, 2)); + y_dims.append_span(output_spatial_shape); + + let N = *(*X).shape.at(0); + let C = *(*X).shape.at(1); + let H = *(*X).shape.at(2); + let W = *(*X).shape.at(3); + + let pooled_H = *y_dims.at(2); + let pooled_W = *y_dims.at(3); + + let x_step = H * W; + + let total_channels = N * C; + + let stride_h = I32Number::new((*strides.at(0)).into(), false); + let stride_w = I32Number::new((*strides.at(1)).into(), false); + + let dilation_h = I32Number::new((*dilations.at(0)).into(), false); + let dilation_w = I32Number::new((*dilations.at(1)).into(), false); + + let ks_h = I32Number::new((*kernel_shape.at(0)).into(), false); + let ks_w = I32Number::new((*kernel_shape.at(1)).into(), false); + + let pad_h = I32Number::new((*pads.at(0)).into(), false); + let pad_w = I32Number::new((*pads.at(1)).into(), false); + + let mut Y_data = ArrayTrait::new(); + let mut I_data = ArrayTrait::new(); + + let X_len = (*X).data.len(); + + let mut c = 0; + loop { + if c == total_channels { + break; + } + let x_d = c * x_step; + + let mut ph = 0; + loop { + if ph == pooled_H { + break; + } + let hstart = I32Number::new((ph).into(), false) * stride_h - pad_h; + let hend = hstart + ks_h * dilation_h; + + let mut pw = 0; + loop { + if pw == pooled_W { + break; + } + let wstart = I32Number::new((pw).into(), false) * stride_w - pad_w; + let wend = wstart + ks_w * dilation_w; + + let mut h_index = I32Number::new(1, true); + let mut w_index = I32Number::new(1, true); + + let mut Yh: T = NumberTrait::min_value(); + + let mut h = hstart; + loop { + if h >= hend { + break; + } + if h >= 0 && h < H.into() { + let mut w = wstart; + loop { + if w >= wend { + break; + } + if w >= 0 && w < W.into() { + let input_index = h * W.into() + w; + if input_index >= 0 && input_index < X_len.into() { + if *(*X).data.at(x_d + input_index.into()) > Yh { + h_index = h.into(); + w_index = w.into(); + Yh = (*(*X).data.at(x_d + input_index.into())); + } + } + } + w += dilation_w; + }; + }; + h += dilation_h; + }; + + if Yh != NumberTrait::::min_value() { + Y_data.append(Yh); + if storage_order == 0 { + I_data.append((c * x_step).into() + h_index * W.into() + w_index); + } else { + I_data.append((c * x_step).into() + h_index + w_index * H.into()); + } + } + pw += 1; + }; + ph += 1; + }; + c += 1; + }; + + if output_len == 1 { + return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); + } + return ( + TensorTrait::new(y_dims.span(), Y_data.span()), + Option::Some(TensorTrait::new(y_dims.span(), I_data.span())) + ); +} + +fn max_pool_3d< + T, + MAG, + +TensorTrait, + +NumberTrait, + +Copy, + +Drop, + +PartialOrd, + +PartialEq, + +PrintTrait +>( + X: @Tensor, + auto_pad: AUTO_PAD, + ceil_mode: usize, + dilations: Span, + kernel_shape: Span, + pads: Span, + storage_order: usize, + strides: Span, + output_spatial_shape: Span, + output_len: usize, +) -> (Tensor, Option>) { + let mut y_dims = ArrayTrait::new(); + y_dims.append_span(SpanTrait::slice((*X).shape, 0, 2)); + y_dims.append_span(output_spatial_shape); + + let N = *(*X).shape.at(0); + let C = *(*X).shape.at(1); + let H = *(*X).shape.at(2); + let W = *(*X).shape.at(3); + let D = *(*X).shape.at(4); + + let pooled_H = *y_dims.at(2); + let pooled_W = *y_dims.at(3); + let pooled_D = *y_dims.at(4); + + let x_step = H * W * D; + + let total_channels = N * C; + + let stride_h = I32Number::new((*strides.at(0)).into(), false); + let stride_w = I32Number::new((*strides.at(1)).into(), false); + let stride_d = I32Number::new((*strides.at(2)).into(), false); + + let dilation_h = I32Number::new((*dilations.at(0)).into(), false); + let dilation_w = I32Number::new((*dilations.at(1)).into(), false); + let dilation_d = I32Number::new((*dilations.at(2)).into(), false); + + let ks_h = I32Number::new((*kernel_shape.at(0)).into(), false); + let ks_w = I32Number::new((*kernel_shape.at(1)).into(), false); + let ks_d = I32Number::new((*kernel_shape.at(2)).into(), false); + + let pad_h = I32Number::new((*pads.at(0)).into(), false); + let pad_w = I32Number::new((*pads.at(1)).into(), false); + let pad_d = I32Number::new((*pads.at(2)).into(), false); + + let mut Y_data = ArrayTrait::new(); + let mut I_data = ArrayTrait::new(); + + let X_len = (*X).data.len(); + + let mut c = 0; + loop { + if c == total_channels { + break; + } + let x_d = c * x_step; + + let mut ph = 0; + loop { + if ph == pooled_H { + break; + } + let hstart = I32Number::new((ph).into(), false) * stride_h - pad_h; + let hend = hstart + ks_h * dilation_h; + + let mut pw = 0; + loop { + if pw == pooled_W { + break; + } + let wstart = I32Number::new((pw).into(), false) * stride_w - pad_w; + let wend = wstart + ks_w * dilation_w; + + let mut pd = 0; + loop { + if pd == pooled_D { + break; + } + let dstart = I32Number::new((pd).into(), false) * stride_d - pad_d; + let dend = dstart + ks_d * dilation_d; + + let mut h_index = I32Number::new(1, true); + let mut w_index = I32Number::new(1, true); + let mut d_index = I32Number::new(1, true); + + let mut Yh: T = NumberTrait::min_value(); + + let mut h = hstart; + let mut Yh = loop { + if h >= hend { + break Yh; + } + if h >= 0 && h < H.into() { + let mut w = wstart; + loop { + if w >= wend { + break Yh; + } + if w >= 0 && w < W.into() { + let mut d = dstart; + loop { + if d >= dend { + break; + } + if d >= 0 && d < D.into() { + let input_index = h * W.into() * D.into() + + w * D.into() + + d; + if input_index >= 0 && input_index < X_len.into() { + if *(*X).data.at(x_d + input_index.into()) > Yh { + h_index = h.into(); + w_index = w.into(); + d_index = d.into(); + Yh = (*(*X).data.at(x_d + input_index.into())); + } + } + } + d += dilation_d; + }; + }; + w += dilation_w; + }; + }; + h += dilation_h; + }; + Y_data.append(Yh); + + if storage_order == 0 { + I_data + .append( + (c * x_step).into() + + h_index * W.into() * D.into() + + w_index * D.into() + + d_index + ); + } else { + I_data + .append( + (c * x_step).into() + + h_index + + w_index * H.into() + + d_index * H.into() * W.into() + ); + } + pd += 1; + }; + pw += 1; + }; + ph += 1; + }; + c += 1; + }; + + if output_len == 1 { + return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); + } + return ( + TensorTrait::new(y_dims.span(), Y_data.span()), + Option::Some(TensorTrait::new(y_dims.span(), I_data.span())) + ); +} + + +fn max_pool_nd< + T, + MAG, + +TensorTrait, + +NumberTrait, + +Copy, + +Drop, + +PartialOrd, + +PartialEq, + +PrintTrait, + +TryInto, + +Into, + +Div +>( + X: @Tensor, + auto_pad: AUTO_PAD, + ceil_mode: usize, + dilations: Span, + kernel_shape: Span, + pads: Span, + storage_order: usize, + strides: Span, + output_spatial_shape: Span, + output_len: usize, +) -> (Tensor, Option>) { + let nd = (*X).shape.len() - 2; + + let mut y_dims = ArrayTrait::new(); + y_dims.append_span(SpanTrait::slice((*X).shape, 0, 2)); + y_dims.append_span(output_spatial_shape); + + let N = *(*X).shape.at(0); + let C = *(*X).shape.at(1); + + let x_stride = stride((*X).shape); + let y_stride = stride(y_dims.span()); + + let i_stride_storage_order_1 = if storage_order == 1 { + let i_stride_storage_order_1 = reverse_stride(SpanTrait::slice((*X).shape, 2, nd)); + i_stride_storage_order_1 + } else { + array![].span() + }; + + let x_step = *x_stride.at(1); + let y_step = *y_stride.at(1); + + let total_channels = N * C; + + let stride_n: Span = u32_span_into_i32_span(strides); + let dilation_n: Span = u32_span_into_i32_span(dilations); + let ks_n: Span = u32_span_into_i32_span(kernel_shape); + let pad_n: Span = u32_span_into_i32_span(pads); + + let mut Y_data = ArrayTrait::new(); + let mut I_data = ArrayTrait::new(); + + let X_len = (*X).data.len(); + + let mut c = 0; + loop { + if c == total_channels { + break; + } + let x_d = c * x_step; + + let mut p = 0; + loop { + if p == y_step { + break; + } + + let mut flatten_index = p; + + let mut nstart = ArrayTrait::new(); + let mut nend = ArrayTrait::new(); + let mut nstep = ArrayTrait::::new(); + + let mut n = 0; + loop { + if n == nd { + break; + } + let (pn, rem) = DivRem::div_rem( + flatten_index, (*y_stride.at(2 + n)).try_into().unwrap() + ); + flatten_index = rem; + + let ns = pn.into() * *stride_n.at(n) - *pad_n.at(n); + nstart.append(ns); + nend.append(ns + *ks_n.at(n) * *dilation_n.at(n)); + + let a: T = NumberTrait::new_unscaled(((*nend.at(n) - ns)).into(), false); + let b: T = NumberTrait::new_unscaled((*dilation_n.at(n)).into(), false); + nstep.append(NumberTrait::ceil(a / b).try_into().unwrap()); + n += 1; + }; + + let nstart = nstart.span(); + let nstride = stride(nstep.span()); + let max_iter = *nstep.at(0) * *nstride.at(0); + + let mut n_index = array![I32Number::new(1, true)].span(); + + let mut Yh: T = NumberTrait::min_value(); + + let mut i = 0; + let Yh = loop { + if i == max_iter { + break Yh; + } + let mut flatten_index = i; + let mut is_outside = false; + let mut i_index = ArrayTrait::new(); + let mut input_index = I32Number::zero(); + + let mut n = 0; + loop { + if n == nd { + break Yh; + } + let (item, rem) = DivRem::div_rem( + flatten_index, (*nstride.at(n)).try_into().unwrap() + ); + flatten_index = rem; + + let item_ = item.into() * *dilation_n.at(n) + *nstart.at(n); + if item_ < 0 || item_ >= (*(*X).shape.at(2 + n)).into() { + is_outside = true; + }; + i_index.append(item_); + input_index += item_ * (*x_stride.at(2 + n)).into(); + + n += 1; + }; + + if !is_outside { + if input_index >= 0 && input_index < X_len.into() { + if *(*X).data.at(x_d + input_index.into()) > Yh { + n_index = i_index.span().clone(); + Yh = (*(*X).data.at(x_d + input_index.into())); + }; + }; + }; + i += 1; + }; + Y_data.append(Yh); + + if storage_order == 0 { + let mut index = 0; + let mut n = 0; + loop { + if n == nd { + break; + } + index += *n_index.at(n) * (*x_stride.at(2 + n)).into(); + n += 1; + }; + I_data.append((c * x_step).into() + index); + } else { + let mut index = 0; + let mut n = nd; + loop { + if n == 0 { + break; + } + index += *n_index.at(n - 1) * (*i_stride_storage_order_1.at(nd - n)).into(); + n -= 1; + }; + I_data.append((c * x_step).into() + index); + } + p += 1; + }; + c += 1; + }; + if output_len == 1 { + return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); + } + return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); +} + + +fn u32_span_into_i32_span(mut x: Span) -> Span { + let mut res = ArrayTrait::new(); + + loop { + match x.pop_front() { + Option::Some(v) => { res.append((*v).into()); }, + Option::None => { break res.span(); } + }; + } +} + +fn reverse_stride(mut a: Span) -> Span { + let mut prod = 1; + let mut arr = ArrayTrait::new(); + loop { + match a.pop_front() { + Option::Some(v) => { + prod *= *v; + arr.append(prod); + }, + Option::None => { break arr.span(); } + }; + } +} + + +fn min(mut a: Span) -> usize { + assert(a.len() > 0, 'span cannot be empty'); + + let mut min = *a.at(0); + loop { + match a.pop_front() { + Option::Some(v) => { if *v < min { + min = *v; + }; }, + Option::None => { break min; } + }; + } +} + + +fn max(mut a: Span) -> usize { + assert(a.len() > 0, 'span cannot be empty'); + + let mut max = *a.at(0); + loop { + match a.pop_front() { + Option::Some(v) => { if *v > max { + max = *v; + }; }, + Option::None => { break max; } + }; + } +} diff --git a/src/operators/nn/implementations/nn_fp16x16.cairo b/src/operators/nn/implementations/nn_fp16x16.cairo index edf7b59c7..aa6a7b6ce 100644 --- a/src/operators/nn/implementations/nn_fp16x16.cairo +++ b/src/operators/nn/implementations/nn_fp16x16.cairo @@ -13,6 +13,9 @@ use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ use orion::operators::tensor::implementations::tensor_fp16x16wide::{ FP16x16WTensor, FP16x16WTensorDiv, FP16x16WTensorAdd }; +use orion::numbers::I32IntoU32; +use orion::operators::tensor::implementations::tensor_i32::I32Tensor; +use orion::operators::nn::AUTO_PAD; impl FP16x16NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -145,4 +148,28 @@ impl FP16x16NN of NNTrait { ) -> Tensor { functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides) } + + fn max_pool( + X: @Tensor, + auto_pad: Option, + ceil_mode: Option, + dilations: Option>, + kernel_shape: Span, + pads: Option>, + storage_order: Option, + strides: Option>, + output_len: usize, + ) -> (Tensor, Option>) { + functional::max_pool::max_pool( + X, + auto_pad, + ceil_mode, + dilations, + kernel_shape, + pads, + storage_order, + strides, + output_len + ) + } } diff --git a/src/operators/nn/implementations/nn_fp32x32.cairo b/src/operators/nn/implementations/nn_fp32x32.cairo index db99af01b..3054dd1ad 100644 --- a/src/operators/nn/implementations/nn_fp32x32.cairo +++ b/src/operators/nn/implementations/nn_fp32x32.cairo @@ -7,6 +7,9 @@ use orion::numbers::fixed_point::implementations::fp32x32::core::{FP32x32, FP32x use orion::operators::tensor::implementations::tensor_fp32x32::{ FP32x32Tensor, FP32x32TensorDiv, FP32x32TensorAdd }; +use orion::numbers::I32IntoU32; +use orion::operators::tensor::implementations::tensor_i32::I32Tensor; +use orion::operators::nn::AUTO_PAD; impl FP32x32NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -139,4 +142,19 @@ impl FP32x32NN of NNTrait { ) -> Tensor { functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides) } + + fn max_pool( + X: @Tensor, + auto_pad: Option, + ceil_mode: Option, + dilations: Option>, + kernel_shape: Span, + pads: Option>, + storage_order: Option, + strides: Option>, + output_len: usize, + ) -> (Tensor, Option>) { + //functional::max_pool::max_pool(X, auto_pad, ceil_mode, dilations,kernel_shape, pads, storage_order, strides, output_len) + panic(array!['not supported!']) + } } diff --git a/src/operators/nn/implementations/nn_fp64x64.cairo b/src/operators/nn/implementations/nn_fp64x64.cairo index 935af584c..a378cfb70 100644 --- a/src/operators/nn/implementations/nn_fp64x64.cairo +++ b/src/operators/nn/implementations/nn_fp64x64.cairo @@ -7,6 +7,9 @@ use orion::numbers::fixed_point::implementations::fp64x64::core::{FP64x64, FP64x use orion::operators::tensor::implementations::tensor_fp64x64::{ FP64x64Tensor, FP64x64TensorDiv, FP64x64TensorAdd }; +//use orion::numbers::I32IntoU64; +use orion::operators::tensor::implementations::tensor_i32::I32Tensor; +use orion::operators::nn::AUTO_PAD; impl FP64x64NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -139,4 +142,19 @@ impl FP64x64NN of NNTrait { ) -> Tensor { functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides) } + + fn max_pool( + X: @Tensor, + auto_pad: Option, + ceil_mode: Option, + dilations: Option>, + kernel_shape: Span, + pads: Option>, + storage_order: Option, + strides: Option>, + output_len: usize, + ) -> (Tensor, Option>) { + //functional::max_pool::max_pool(X, auto_pad, ceil_mode, dilations,kernel_shape, pads, storage_order, strides, output_len) + panic(array!['not supported!']) + } } diff --git a/src/operators/nn/implementations/nn_fp8x23.cairo b/src/operators/nn/implementations/nn_fp8x23.cairo index 842c115f9..add955fd9 100644 --- a/src/operators/nn/implementations/nn_fp8x23.cairo +++ b/src/operators/nn/implementations/nn_fp8x23.cairo @@ -11,6 +11,9 @@ use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ FP8x23WImpl, FP8x23WTryIntoFP8x23, FP8x23W, FP8x23IntoFP8x23W }; use orion::operators::tensor::implementations::tensor_fp8x23wide::{FP8x23WTensor}; +use orion::numbers::I32IntoU32; +use orion::operators::tensor::implementations::tensor_i32::I32Tensor; +use orion::operators::nn::AUTO_PAD; impl FP8x23NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -141,4 +144,28 @@ impl FP8x23NN of NNTrait { ) -> Tensor { functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides) } + + fn max_pool( + X: @Tensor, + auto_pad: Option, + ceil_mode: Option, + dilations: Option>, + kernel_shape: Span, + pads: Option>, + storage_order: Option, + strides: Option>, + output_len: usize, + ) -> (Tensor, Option>) { + functional::max_pool::max_pool( + X, + auto_pad, + ceil_mode, + dilations, + kernel_shape, + pads, + storage_order, + strides, + output_len + ) + } } diff --git a/src/operators/nn/implementations/nn_i32.cairo b/src/operators/nn/implementations/nn_i32.cairo index 13a670746..0156fc5f5 100644 --- a/src/operators/nn/implementations/nn_i32.cairo +++ b/src/operators/nn/implementations/nn_i32.cairo @@ -4,6 +4,7 @@ use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::operators::tensor::implementations::tensor_i32::{I32Tensor, I32TensorAdd}; +use orion::operators::nn::AUTO_PAD; impl I32NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -132,4 +133,18 @@ impl I32NN of NNTrait { ) -> Tensor { functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides) } + + fn max_pool( + X: @Tensor, + auto_pad: Option, + ceil_mode: Option, + dilations: Option>, + kernel_shape: Span, + pads: Option>, + storage_order: Option, + strides: Option>, + output_len: usize, + ) -> (Tensor, Option>) { + panic(array!['not supported!']) + } } diff --git a/src/operators/nn/implementations/nn_i8.cairo b/src/operators/nn/implementations/nn_i8.cairo index 5d359a91c..284dd5ee1 100644 --- a/src/operators/nn/implementations/nn_i8.cairo +++ b/src/operators/nn/implementations/nn_i8.cairo @@ -4,6 +4,8 @@ use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::operators::tensor::implementations::tensor_i8::{I8Tensor, I8TensorAdd}; +use orion::operators::tensor::implementations::tensor_i32::I32Tensor; +use orion::operators::nn::AUTO_PAD; impl I8NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -132,4 +134,18 @@ impl I8NN of NNTrait { ) -> Tensor { functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides) } + + fn max_pool( + X: @Tensor, + auto_pad: Option, + ceil_mode: Option, + dilations: Option>, + kernel_shape: Span, + pads: Option>, + storage_order: Option, + strides: Option>, + output_len: usize, + ) -> (Tensor, Option>) { + panic(array!['not supported!']) + } } diff --git a/src/operators/nn/implementations/nn_u32.cairo b/src/operators/nn/implementations/nn_u32.cairo index 9e4038fe4..1ebfb3bec 100644 --- a/src/operators/nn/implementations/nn_u32.cairo +++ b/src/operators/nn/implementations/nn_u32.cairo @@ -4,6 +4,8 @@ use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::operators::tensor::implementations::tensor_u32::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::implementations::tensor_i32::I32Tensor; +use orion::operators::nn::AUTO_PAD; impl U32NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -132,4 +134,18 @@ impl U32NN of NNTrait { ) -> Tensor { functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides) } + + fn max_pool( + X: @Tensor, + auto_pad: Option, + ceil_mode: Option, + dilations: Option>, + kernel_shape: Span, + pads: Option>, + storage_order: Option, + strides: Option>, + output_len: usize, + ) -> (Tensor, Option>) { + panic(array!['not supported!']) + } } diff --git a/tests/lib.cairo b/tests/lib.cairo index f5cecb77d..250436c9f 100644 --- a/tests/lib.cairo +++ b/tests/lib.cairo @@ -5,3 +5,5 @@ mod nodes; mod ml; mod operators; + + diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 8814cfb80..2133d4f46 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1039,3 +1039,16 @@ mod conv_2D_with_autopad_same; mod conv_2D_with_strides_asymmetric_padding; mod conv_2D_with_strides_with_padding; mod conv_4D_with_padding; +mod maxpool_2d; +mod maxpool_1d; +mod maxpool_1d_default; +mod maxpool_2d_ceil; +mod maxpool_2d_constraint_index; +mod maxpool_2d_default; +mod maxpool_2d_dilations; +mod maxpool_2d_pads_default; +mod maxpool_2d_same_lower_default; +mod maxpool_2d_same_upper; +mod maxpool_2d_same_upper_default; +mod maxpool_3d_dilations; +mod maxpool_4d_dilations; diff --git a/tests/nodes/maxpool_1d.cairo b/tests/nodes/maxpool_1d.cairo new file mode 100644 index 000000000..e9ef475f8 --- /dev/null +++ b/tests/nodes/maxpool_1d.cairo @@ -0,0 +1,30 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_maxpool_1d() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let (y_0, _) = NNTrait::max_pool( + @input_0, + Option::None, + Option::None, + Option::None, + array![2].span(), + Option::None, + Option::None, + Option::Some(array![2].span()), + 1 + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/maxpool_1d/input_0.cairo b/tests/nodes/maxpool_1d/input_0.cairo new file mode 100644 index 000000000..3e92bf1d2 --- /dev/null +++ b/tests/nodes/maxpool_1d/input_0.cairo @@ -0,0 +1,110 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(3); + shape.append(32); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 61884, sign: true }); + data.append(FP16x16 { mag: 66202, sign: true }); + data.append(FP16x16 { mag: 63102, sign: false }); + data.append(FP16x16 { mag: 70834, sign: false }); + data.append(FP16x16 { mag: 84277, sign: true }); + data.append(FP16x16 { mag: 15414, sign: true }); + data.append(FP16x16 { mag: 69938, sign: true }); + data.append(FP16x16 { mag: 63283, sign: true }); + data.append(FP16x16 { mag: 43864, sign: false }); + data.append(FP16x16 { mag: 80777, sign: false }); + data.append(FP16x16 { mag: 62331, sign: false }); + data.append(FP16x16 { mag: 67865, sign: true }); + data.append(FP16x16 { mag: 24630, sign: true }); + data.append(FP16x16 { mag: 36245, sign: false }); + data.append(FP16x16 { mag: 134644, sign: true }); + data.append(FP16x16 { mag: 43793, sign: false }); + data.append(FP16x16 { mag: 86411, sign: false }); + data.append(FP16x16 { mag: 86146, sign: true }); + data.append(FP16x16 { mag: 16382, sign: true }); + data.append(FP16x16 { mag: 146017, sign: false }); + data.append(FP16x16 { mag: 7647, sign: true }); + data.append(FP16x16 { mag: 45163, sign: true }); + data.append(FP16x16 { mag: 104406, sign: false }); + data.append(FP16x16 { mag: 45462, sign: false }); + data.append(FP16x16 { mag: 86222, sign: false }); + data.append(FP16x16 { mag: 9912, sign: true }); + data.append(FP16x16 { mag: 22960, sign: true }); + data.append(FP16x16 { mag: 55123, sign: true }); + data.append(FP16x16 { mag: 124655, sign: true }); + data.append(FP16x16 { mag: 31465, sign: false }); + data.append(FP16x16 { mag: 61922, sign: false }); + data.append(FP16x16 { mag: 163238, sign: true }); + data.append(FP16x16 { mag: 34228, sign: true }); + data.append(FP16x16 { mag: 4475, sign: false }); + data.append(FP16x16 { mag: 56673, sign: true }); + data.append(FP16x16 { mag: 90552, sign: true }); + data.append(FP16x16 { mag: 66213, sign: false }); + data.append(FP16x16 { mag: 214831, sign: true }); + data.append(FP16x16 { mag: 77997, sign: true }); + data.append(FP16x16 { mag: 704, sign: true }); + data.append(FP16x16 { mag: 33211, sign: true }); + data.append(FP16x16 { mag: 12139, sign: true }); + data.append(FP16x16 { mag: 18185, sign: false }); + data.append(FP16x16 { mag: 90981, sign: true }); + data.append(FP16x16 { mag: 37230, sign: false }); + data.append(FP16x16 { mag: 15860, sign: true }); + data.append(FP16x16 { mag: 38407, sign: true }); + data.append(FP16x16 { mag: 16248, sign: false }); + data.append(FP16x16 { mag: 109129, sign: false }); + data.append(FP16x16 { mag: 52730, sign: true }); + data.append(FP16x16 { mag: 48858, sign: false }); + data.append(FP16x16 { mag: 72554, sign: false }); + data.append(FP16x16 { mag: 89600, sign: false }); + data.append(FP16x16 { mag: 61833, sign: true }); + data.append(FP16x16 { mag: 9863, sign: false }); + data.append(FP16x16 { mag: 2754, sign: false }); + data.append(FP16x16 { mag: 85035, sign: false }); + data.append(FP16x16 { mag: 47440, sign: true }); + data.append(FP16x16 { mag: 176235, sign: false }); + data.append(FP16x16 { mag: 77741, sign: false }); + data.append(FP16x16 { mag: 18683, sign: true }); + data.append(FP16x16 { mag: 8069, sign: true }); + data.append(FP16x16 { mag: 30891, sign: true }); + data.append(FP16x16 { mag: 26682, sign: true }); + data.append(FP16x16 { mag: 32658, sign: true }); + data.append(FP16x16 { mag: 1956, sign: true }); + data.append(FP16x16 { mag: 96803, sign: false }); + data.append(FP16x16 { mag: 61321, sign: false }); + data.append(FP16x16 { mag: 33065, sign: true }); + data.append(FP16x16 { mag: 59893, sign: false }); + data.append(FP16x16 { mag: 157662, sign: false }); + data.append(FP16x16 { mag: 52334, sign: true }); + data.append(FP16x16 { mag: 46043, sign: true }); + data.append(FP16x16 { mag: 152484, sign: false }); + data.append(FP16x16 { mag: 27460, sign: false }); + data.append(FP16x16 { mag: 1553, sign: false }); + data.append(FP16x16 { mag: 29415, sign: true }); + data.append(FP16x16 { mag: 26375, sign: false }); + data.append(FP16x16 { mag: 21889, sign: false }); + data.append(FP16x16 { mag: 80932, sign: true }); + data.append(FP16x16 { mag: 2233, sign: false }); + data.append(FP16x16 { mag: 42479, sign: false }); + data.append(FP16x16 { mag: 1156, sign: false }); + data.append(FP16x16 { mag: 158107, sign: false }); + data.append(FP16x16 { mag: 34271, sign: false }); + data.append(FP16x16 { mag: 86694, sign: true }); + data.append(FP16x16 { mag: 89544, sign: true }); + data.append(FP16x16 { mag: 29105, sign: true }); + data.append(FP16x16 { mag: 21337, sign: false }); + data.append(FP16x16 { mag: 12441, sign: true }); + data.append(FP16x16 { mag: 24034, sign: true }); + data.append(FP16x16 { mag: 24040, sign: true }); + data.append(FP16x16 { mag: 73971, sign: true }); + data.append(FP16x16 { mag: 700, sign: false }); + data.append(FP16x16 { mag: 99358, sign: true }); + data.append(FP16x16 { mag: 109591, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_1d/output_0.cairo b/tests/nodes/maxpool_1d/output_0.cairo new file mode 100644 index 000000000..97ac3b8de --- /dev/null +++ b/tests/nodes/maxpool_1d/output_0.cairo @@ -0,0 +1,62 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(3); + shape.append(16); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 61884, sign: true }); + data.append(FP16x16 { mag: 70834, sign: false }); + data.append(FP16x16 { mag: 15414, sign: true }); + data.append(FP16x16 { mag: 63283, sign: true }); + data.append(FP16x16 { mag: 80777, sign: false }); + data.append(FP16x16 { mag: 62331, sign: false }); + data.append(FP16x16 { mag: 36245, sign: false }); + data.append(FP16x16 { mag: 43793, sign: false }); + data.append(FP16x16 { mag: 86411, sign: false }); + data.append(FP16x16 { mag: 146017, sign: false }); + data.append(FP16x16 { mag: 7647, sign: true }); + data.append(FP16x16 { mag: 104406, sign: false }); + data.append(FP16x16 { mag: 86222, sign: false }); + data.append(FP16x16 { mag: 22960, sign: true }); + data.append(FP16x16 { mag: 31465, sign: false }); + data.append(FP16x16 { mag: 61922, sign: false }); + data.append(FP16x16 { mag: 4475, sign: false }); + data.append(FP16x16 { mag: 56673, sign: true }); + data.append(FP16x16 { mag: 66213, sign: false }); + data.append(FP16x16 { mag: 704, sign: true }); + data.append(FP16x16 { mag: 12139, sign: true }); + data.append(FP16x16 { mag: 18185, sign: false }); + data.append(FP16x16 { mag: 37230, sign: false }); + data.append(FP16x16 { mag: 16248, sign: false }); + data.append(FP16x16 { mag: 109129, sign: false }); + data.append(FP16x16 { mag: 72554, sign: false }); + data.append(FP16x16 { mag: 89600, sign: false }); + data.append(FP16x16 { mag: 9863, sign: false }); + data.append(FP16x16 { mag: 85035, sign: false }); + data.append(FP16x16 { mag: 176235, sign: false }); + data.append(FP16x16 { mag: 8069, sign: true }); + data.append(FP16x16 { mag: 26682, sign: true }); + data.append(FP16x16 { mag: 1956, sign: true }); + data.append(FP16x16 { mag: 96803, sign: false }); + data.append(FP16x16 { mag: 59893, sign: false }); + data.append(FP16x16 { mag: 157662, sign: false }); + data.append(FP16x16 { mag: 152484, sign: false }); + data.append(FP16x16 { mag: 27460, sign: false }); + data.append(FP16x16 { mag: 26375, sign: false }); + data.append(FP16x16 { mag: 21889, sign: false }); + data.append(FP16x16 { mag: 42479, sign: false }); + data.append(FP16x16 { mag: 158107, sign: false }); + data.append(FP16x16 { mag: 34271, sign: false }); + data.append(FP16x16 { mag: 29105, sign: true }); + data.append(FP16x16 { mag: 21337, sign: false }); + data.append(FP16x16 { mag: 24034, sign: true }); + data.append(FP16x16 { mag: 700, sign: false }); + data.append(FP16x16 { mag: 109591, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_1d_default.cairo b/tests/nodes/maxpool_1d_default.cairo new file mode 100644 index 000000000..d036694fa --- /dev/null +++ b/tests/nodes/maxpool_1d_default.cairo @@ -0,0 +1,31 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_maxpool_1d_default() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let (y_0, _) = NNTrait::max_pool( + @input_0, + Option::None, + Option::None, + Option::None, + array![2].span(), + Option::None, + Option::None, + Option::None, + 1 + ); + + assert_eq(y_0, z_0); +} + diff --git a/tests/nodes/maxpool_1d_default/input_0.cairo b/tests/nodes/maxpool_1d_default/input_0.cairo new file mode 100644 index 000000000..f9522826d --- /dev/null +++ b/tests/nodes/maxpool_1d_default/input_0.cairo @@ -0,0 +1,110 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(3); + shape.append(32); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 50224, sign: true }); + data.append(FP16x16 { mag: 2534, sign: false }); + data.append(FP16x16 { mag: 1978, sign: false }); + data.append(FP16x16 { mag: 53529, sign: false }); + data.append(FP16x16 { mag: 171944, sign: true }); + data.append(FP16x16 { mag: 47778, sign: false }); + data.append(FP16x16 { mag: 85540, sign: true }); + data.append(FP16x16 { mag: 3998, sign: false }); + data.append(FP16x16 { mag: 5615, sign: false }); + data.append(FP16x16 { mag: 1394, sign: true }); + data.append(FP16x16 { mag: 74940, sign: false }); + data.append(FP16x16 { mag: 32499, sign: false }); + data.append(FP16x16 { mag: 13610, sign: false }); + data.append(FP16x16 { mag: 147171, sign: false }); + data.append(FP16x16 { mag: 4356, sign: true }); + data.append(FP16x16 { mag: 90349, sign: true }); + data.append(FP16x16 { mag: 96528, sign: false }); + data.append(FP16x16 { mag: 108927, sign: true }); + data.append(FP16x16 { mag: 10457, sign: true }); + data.append(FP16x16 { mag: 2548, sign: false }); + data.append(FP16x16 { mag: 48359, sign: false }); + data.append(FP16x16 { mag: 25137, sign: false }); + data.append(FP16x16 { mag: 31065, sign: false }); + data.append(FP16x16 { mag: 83420, sign: false }); + data.append(FP16x16 { mag: 58282, sign: false }); + data.append(FP16x16 { mag: 71330, sign: false }); + data.append(FP16x16 { mag: 14944, sign: false }); + data.append(FP16x16 { mag: 95778, sign: true }); + data.append(FP16x16 { mag: 52231, sign: true }); + data.append(FP16x16 { mag: 1629, sign: false }); + data.append(FP16x16 { mag: 86604, sign: false }); + data.append(FP16x16 { mag: 24073, sign: false }); + data.append(FP16x16 { mag: 54993, sign: true }); + data.append(FP16x16 { mag: 87393, sign: true }); + data.append(FP16x16 { mag: 83491, sign: true }); + data.append(FP16x16 { mag: 11108, sign: false }); + data.append(FP16x16 { mag: 118783, sign: true }); + data.append(FP16x16 { mag: 119405, sign: true }); + data.append(FP16x16 { mag: 66301, sign: false }); + data.append(FP16x16 { mag: 128037, sign: false }); + data.append(FP16x16 { mag: 2385, sign: true }); + data.append(FP16x16 { mag: 31954, sign: true }); + data.append(FP16x16 { mag: 30235, sign: false }); + data.append(FP16x16 { mag: 34919, sign: false }); + data.append(FP16x16 { mag: 69026, sign: false }); + data.append(FP16x16 { mag: 25820, sign: true }); + data.append(FP16x16 { mag: 80142, sign: false }); + data.append(FP16x16 { mag: 71641, sign: false }); + data.append(FP16x16 { mag: 72810, sign: true }); + data.append(FP16x16 { mag: 23490, sign: false }); + data.append(FP16x16 { mag: 17323, sign: true }); + data.append(FP16x16 { mag: 24532, sign: true }); + data.append(FP16x16 { mag: 66044, sign: false }); + data.append(FP16x16 { mag: 44213, sign: true }); + data.append(FP16x16 { mag: 8164, sign: true }); + data.append(FP16x16 { mag: 32326, sign: false }); + data.append(FP16x16 { mag: 43120, sign: true }); + data.append(FP16x16 { mag: 181, sign: true }); + data.append(FP16x16 { mag: 18666, sign: true }); + data.append(FP16x16 { mag: 8560, sign: true }); + data.append(FP16x16 { mag: 15235, sign: true }); + data.append(FP16x16 { mag: 25524, sign: false }); + data.append(FP16x16 { mag: 97926, sign: true }); + data.append(FP16x16 { mag: 83401, sign: true }); + data.append(FP16x16 { mag: 10862, sign: false }); + data.append(FP16x16 { mag: 13170, sign: true }); + data.append(FP16x16 { mag: 14320, sign: false }); + data.append(FP16x16 { mag: 82805, sign: false }); + data.append(FP16x16 { mag: 11320, sign: false }); + data.append(FP16x16 { mag: 36914, sign: false }); + data.append(FP16x16 { mag: 476, sign: false }); + data.append(FP16x16 { mag: 26739, sign: true }); + data.append(FP16x16 { mag: 27204, sign: false }); + data.append(FP16x16 { mag: 135386, sign: true }); + data.append(FP16x16 { mag: 179608, sign: false }); + data.append(FP16x16 { mag: 38394, sign: true }); + data.append(FP16x16 { mag: 124283, sign: false }); + data.append(FP16x16 { mag: 17926, sign: false }); + data.append(FP16x16 { mag: 199614, sign: false }); + data.append(FP16x16 { mag: 21143, sign: false }); + data.append(FP16x16 { mag: 58284, sign: false }); + data.append(FP16x16 { mag: 44246, sign: true }); + data.append(FP16x16 { mag: 58693, sign: false }); + data.append(FP16x16 { mag: 39360, sign: false }); + data.append(FP16x16 { mag: 79614, sign: true }); + data.append(FP16x16 { mag: 36430, sign: false }); + data.append(FP16x16 { mag: 19447, sign: true }); + data.append(FP16x16 { mag: 10755, sign: false }); + data.append(FP16x16 { mag: 3572, sign: true }); + data.append(FP16x16 { mag: 23011, sign: true }); + data.append(FP16x16 { mag: 12359, sign: false }); + data.append(FP16x16 { mag: 33072, sign: true }); + data.append(FP16x16 { mag: 80505, sign: true }); + data.append(FP16x16 { mag: 25351, sign: false }); + data.append(FP16x16 { mag: 84321, sign: true }); + data.append(FP16x16 { mag: 39865, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_1d_default/output_0.cairo b/tests/nodes/maxpool_1d_default/output_0.cairo new file mode 100644 index 000000000..94eee9956 --- /dev/null +++ b/tests/nodes/maxpool_1d_default/output_0.cairo @@ -0,0 +1,107 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(3); + shape.append(31); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 2534, sign: false }); + data.append(FP16x16 { mag: 2534, sign: false }); + data.append(FP16x16 { mag: 53529, sign: false }); + data.append(FP16x16 { mag: 53529, sign: false }); + data.append(FP16x16 { mag: 47778, sign: false }); + data.append(FP16x16 { mag: 47778, sign: false }); + data.append(FP16x16 { mag: 3998, sign: false }); + data.append(FP16x16 { mag: 5615, sign: false }); + data.append(FP16x16 { mag: 5615, sign: false }); + data.append(FP16x16 { mag: 74940, sign: false }); + data.append(FP16x16 { mag: 74940, sign: false }); + data.append(FP16x16 { mag: 32499, sign: false }); + data.append(FP16x16 { mag: 147171, sign: false }); + data.append(FP16x16 { mag: 147171, sign: false }); + data.append(FP16x16 { mag: 4356, sign: true }); + data.append(FP16x16 { mag: 96528, sign: false }); + data.append(FP16x16 { mag: 96528, sign: false }); + data.append(FP16x16 { mag: 10457, sign: true }); + data.append(FP16x16 { mag: 2548, sign: false }); + data.append(FP16x16 { mag: 48359, sign: false }); + data.append(FP16x16 { mag: 48359, sign: false }); + data.append(FP16x16 { mag: 31065, sign: false }); + data.append(FP16x16 { mag: 83420, sign: false }); + data.append(FP16x16 { mag: 83420, sign: false }); + data.append(FP16x16 { mag: 71330, sign: false }); + data.append(FP16x16 { mag: 71330, sign: false }); + data.append(FP16x16 { mag: 14944, sign: false }); + data.append(FP16x16 { mag: 52231, sign: true }); + data.append(FP16x16 { mag: 1629, sign: false }); + data.append(FP16x16 { mag: 86604, sign: false }); + data.append(FP16x16 { mag: 86604, sign: false }); + data.append(FP16x16 { mag: 54993, sign: true }); + data.append(FP16x16 { mag: 83491, sign: true }); + data.append(FP16x16 { mag: 11108, sign: false }); + data.append(FP16x16 { mag: 11108, sign: false }); + data.append(FP16x16 { mag: 118783, sign: true }); + data.append(FP16x16 { mag: 66301, sign: false }); + data.append(FP16x16 { mag: 128037, sign: false }); + data.append(FP16x16 { mag: 128037, sign: false }); + data.append(FP16x16 { mag: 2385, sign: true }); + data.append(FP16x16 { mag: 30235, sign: false }); + data.append(FP16x16 { mag: 34919, sign: false }); + data.append(FP16x16 { mag: 69026, sign: false }); + data.append(FP16x16 { mag: 69026, sign: false }); + data.append(FP16x16 { mag: 80142, sign: false }); + data.append(FP16x16 { mag: 80142, sign: false }); + data.append(FP16x16 { mag: 71641, sign: false }); + data.append(FP16x16 { mag: 23490, sign: false }); + data.append(FP16x16 { mag: 23490, sign: false }); + data.append(FP16x16 { mag: 17323, sign: true }); + data.append(FP16x16 { mag: 66044, sign: false }); + data.append(FP16x16 { mag: 66044, sign: false }); + data.append(FP16x16 { mag: 8164, sign: true }); + data.append(FP16x16 { mag: 32326, sign: false }); + data.append(FP16x16 { mag: 32326, sign: false }); + data.append(FP16x16 { mag: 181, sign: true }); + data.append(FP16x16 { mag: 181, sign: true }); + data.append(FP16x16 { mag: 8560, sign: true }); + data.append(FP16x16 { mag: 8560, sign: true }); + data.append(FP16x16 { mag: 25524, sign: false }); + data.append(FP16x16 { mag: 25524, sign: false }); + data.append(FP16x16 { mag: 83401, sign: true }); + data.append(FP16x16 { mag: 10862, sign: false }); + data.append(FP16x16 { mag: 14320, sign: false }); + data.append(FP16x16 { mag: 82805, sign: false }); + data.append(FP16x16 { mag: 82805, sign: false }); + data.append(FP16x16 { mag: 36914, sign: false }); + data.append(FP16x16 { mag: 36914, sign: false }); + data.append(FP16x16 { mag: 476, sign: false }); + data.append(FP16x16 { mag: 27204, sign: false }); + data.append(FP16x16 { mag: 27204, sign: false }); + data.append(FP16x16 { mag: 179608, sign: false }); + data.append(FP16x16 { mag: 179608, sign: false }); + data.append(FP16x16 { mag: 124283, sign: false }); + data.append(FP16x16 { mag: 124283, sign: false }); + data.append(FP16x16 { mag: 199614, sign: false }); + data.append(FP16x16 { mag: 199614, sign: false }); + data.append(FP16x16 { mag: 58284, sign: false }); + data.append(FP16x16 { mag: 58284, sign: false }); + data.append(FP16x16 { mag: 58693, sign: false }); + data.append(FP16x16 { mag: 58693, sign: false }); + data.append(FP16x16 { mag: 39360, sign: false }); + data.append(FP16x16 { mag: 36430, sign: false }); + data.append(FP16x16 { mag: 36430, sign: false }); + data.append(FP16x16 { mag: 10755, sign: false }); + data.append(FP16x16 { mag: 10755, sign: false }); + data.append(FP16x16 { mag: 3572, sign: true }); + data.append(FP16x16 { mag: 12359, sign: false }); + data.append(FP16x16 { mag: 12359, sign: false }); + data.append(FP16x16 { mag: 33072, sign: true }); + data.append(FP16x16 { mag: 25351, sign: false }); + data.append(FP16x16 { mag: 25351, sign: false }); + data.append(FP16x16 { mag: 39865, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_2d.cairo b/tests/nodes/maxpool_2d.cairo new file mode 100644 index 000000000..20e768eb7 --- /dev/null +++ b/tests/nodes/maxpool_2d.cairo @@ -0,0 +1,30 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_maxpool_2d() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let (y_0, _) = NNTrait::max_pool( + @input_0, + Option::None, + Option::None, + Option::None, + array![2, 2].span(), + Option::None, + Option::None, + Option::Some(array![2, 2].span()), + 1 + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/maxpool_2d/input_0.cairo b/tests/nodes/maxpool_2d/input_0.cairo new file mode 100644 index 000000000..28d2d90c9 --- /dev/null +++ b/tests/nodes/maxpool_2d/input_0.cairo @@ -0,0 +1,40 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(5); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_2d/output_0.cairo b/tests/nodes/maxpool_2d/output_0.cairo new file mode 100644 index 000000000..91ca1ea93 --- /dev/null +++ b/tests/nodes/maxpool_2d/output_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_2d_ceil.cairo b/tests/nodes/maxpool_2d_ceil.cairo new file mode 100644 index 000000000..24f84ea08 --- /dev/null +++ b/tests/nodes/maxpool_2d_ceil.cairo @@ -0,0 +1,30 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_maxpool_2d_ceil() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let (y_0, _) = NNTrait::max_pool( + @input_0, + Option::None, + Option::Some(1), + Option::None, + array![3, 3].span(), + Option::None, + Option::None, + Option::Some(array![2, 2].span()), + 1 + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/maxpool_2d_ceil/input_0.cairo b/tests/nodes/maxpool_2d_ceil/input_0.cairo new file mode 100644 index 000000000..29b195332 --- /dev/null +++ b/tests/nodes/maxpool_2d_ceil/input_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_2d_ceil/output_0.cairo b/tests/nodes/maxpool_2d_ceil/output_0.cairo new file mode 100644 index 000000000..af7270750 --- /dev/null +++ b/tests/nodes/maxpool_2d_ceil/output_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_2d_constraint_index.cairo b/tests/nodes/maxpool_2d_constraint_index.cairo new file mode 100644 index 000000000..72f1294ad --- /dev/null +++ b/tests/nodes/maxpool_2d_constraint_index.cairo @@ -0,0 +1,25 @@ +//mod input_0; +//mod output_0; +// +// +//use orion::operators::nn::NNTrait; +//use orion::operators::tensor::U32TensorPartialEq; +//use orion::numbers::FixedTrait; +//use orion::operators::tensor::I32TensorPartialEq; +//use orion::utils::{assert_eq, assert_seq_eq}; +//use orion::operators::nn::FP16x16NN; +//use orion::operators::nn::U32NN; +// +// +//#[test] +//#[available_gas(2000000000)] +//fn test_maxpool_2d_constraint_index() { +// let input_0 = input_0::input_0(); +// let z_0 = output_0::output_0(); +// +// let (_, y_0) = NNTrait::max_pool(@input_0,Option::None,Option::None,Option::None,array![2, 2].span(),Option::None,Option::Some(1),Option::Some(array![2, 2].span()),1); +// +// assert_eq(y_0.unwrap(), z_0); +//} + + diff --git a/tests/nodes/maxpool_2d_constraint_index/input_0.cairo b/tests/nodes/maxpool_2d_constraint_index/input_0.cairo new file mode 100644 index 000000000..28d2d90c9 --- /dev/null +++ b/tests/nodes/maxpool_2d_constraint_index/input_0.cairo @@ -0,0 +1,40 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(5); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_2d_constraint_index/output_0.cairo b/tests/nodes/maxpool_2d_constraint_index/output_0.cairo new file mode 100644 index 000000000..872d5499f --- /dev/null +++ b/tests/nodes/maxpool_2d_constraint_index/output_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(6); + data.append(16); + data.append(8); + data.append(18); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_2d_default.cairo b/tests/nodes/maxpool_2d_default.cairo new file mode 100644 index 000000000..8ad969018 --- /dev/null +++ b/tests/nodes/maxpool_2d_default.cairo @@ -0,0 +1,30 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_maxpool_2d_default() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let (y_0, _) = NNTrait::max_pool( + @input_0, + Option::None, + Option::None, + Option::None, + array![2, 2].span(), + Option::None, + Option::None, + Option::None, + 1 + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/maxpool_2d_default/input_0.cairo b/tests/nodes/maxpool_2d_default/input_0.cairo new file mode 100644 index 000000000..1950ff197 --- /dev/null +++ b/tests/nodes/maxpool_2d_default/input_0.cairo @@ -0,0 +1,207 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(3); + shape.append(8); + shape.append(8); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 70171, sign: true }); + data.append(FP16x16 { mag: 66033, sign: true }); + data.append(FP16x16 { mag: 34368, sign: true }); + data.append(FP16x16 { mag: 85619, sign: true }); + data.append(FP16x16 { mag: 10533, sign: false }); + data.append(FP16x16 { mag: 6665, sign: true }); + data.append(FP16x16 { mag: 62906, sign: false }); + data.append(FP16x16 { mag: 4088, sign: true }); + data.append(FP16x16 { mag: 48834, sign: true }); + data.append(FP16x16 { mag: 77568, sign: false }); + data.append(FP16x16 { mag: 72118, sign: false }); + data.append(FP16x16 { mag: 63744, sign: true }); + data.append(FP16x16 { mag: 97321, sign: false }); + data.append(FP16x16 { mag: 51751, sign: false }); + data.append(FP16x16 { mag: 49419, sign: true }); + data.append(FP16x16 { mag: 1074, sign: true }); + data.append(FP16x16 { mag: 33390, sign: true }); + data.append(FP16x16 { mag: 49420, sign: false }); + data.append(FP16x16 { mag: 76230, sign: true }); + data.append(FP16x16 { mag: 90932, sign: true }); + data.append(FP16x16 { mag: 51418, sign: true }); + data.append(FP16x16 { mag: 4380, sign: true }); + data.append(FP16x16 { mag: 160417, sign: false }); + data.append(FP16x16 { mag: 93830, sign: true }); + data.append(FP16x16 { mag: 49619, sign: true }); + data.append(FP16x16 { mag: 24892, sign: false }); + data.append(FP16x16 { mag: 60389, sign: false }); + data.append(FP16x16 { mag: 82115, sign: false }); + data.append(FP16x16 { mag: 27484, sign: true }); + data.append(FP16x16 { mag: 9373, sign: false }); + data.append(FP16x16 { mag: 13065, sign: true }); + data.append(FP16x16 { mag: 3608, sign: false }); + data.append(FP16x16 { mag: 12179, sign: false }); + data.append(FP16x16 { mag: 47764, sign: false }); + data.append(FP16x16 { mag: 45483, sign: true }); + data.append(FP16x16 { mag: 73567, sign: false }); + data.append(FP16x16 { mag: 108526, sign: false }); + data.append(FP16x16 { mag: 76992, sign: true }); + data.append(FP16x16 { mag: 88006, sign: true }); + data.append(FP16x16 { mag: 121995, sign: true }); + data.append(FP16x16 { mag: 30997, sign: true }); + data.append(FP16x16 { mag: 37845, sign: true }); + data.append(FP16x16 { mag: 41773, sign: false }); + data.append(FP16x16 { mag: 25693, sign: false }); + data.append(FP16x16 { mag: 2257, sign: false }); + data.append(FP16x16 { mag: 21906, sign: true }); + data.append(FP16x16 { mag: 5291, sign: false }); + data.append(FP16x16 { mag: 24583, sign: true }); + data.append(FP16x16 { mag: 60049, sign: true }); + data.append(FP16x16 { mag: 9991, sign: false }); + data.append(FP16x16 { mag: 104858, sign: false }); + data.append(FP16x16 { mag: 55871, sign: true }); + data.append(FP16x16 { mag: 54598, sign: true }); + data.append(FP16x16 { mag: 167306, sign: true }); + data.append(FP16x16 { mag: 35180, sign: false }); + data.append(FP16x16 { mag: 196014, sign: true }); + data.append(FP16x16 { mag: 49285, sign: true }); + data.append(FP16x16 { mag: 50669, sign: true }); + data.append(FP16x16 { mag: 11523, sign: true }); + data.append(FP16x16 { mag: 9496, sign: true }); + data.append(FP16x16 { mag: 55801, sign: false }); + data.append(FP16x16 { mag: 104529, sign: false }); + data.append(FP16x16 { mag: 637, sign: true }); + data.append(FP16x16 { mag: 93676, sign: true }); + data.append(FP16x16 { mag: 24789, sign: false }); + data.append(FP16x16 { mag: 7861, sign: false }); + data.append(FP16x16 { mag: 51137, sign: false }); + data.append(FP16x16 { mag: 73137, sign: true }); + data.append(FP16x16 { mag: 99812, sign: false }); + data.append(FP16x16 { mag: 62976, sign: true }); + data.append(FP16x16 { mag: 17193, sign: false }); + data.append(FP16x16 { mag: 30532, sign: true }); + data.append(FP16x16 { mag: 8014, sign: false }); + data.append(FP16x16 { mag: 47501, sign: true }); + data.append(FP16x16 { mag: 66682, sign: true }); + data.append(FP16x16 { mag: 95646, sign: false }); + data.append(FP16x16 { mag: 20504, sign: true }); + data.append(FP16x16 { mag: 4688, sign: true }); + data.append(FP16x16 { mag: 26672, sign: false }); + data.append(FP16x16 { mag: 88843, sign: false }); + data.append(FP16x16 { mag: 100847, sign: false }); + data.append(FP16x16 { mag: 128504, sign: true }); + data.append(FP16x16 { mag: 58079, sign: true }); + data.append(FP16x16 { mag: 42461, sign: false }); + data.append(FP16x16 { mag: 20574, sign: true }); + data.append(FP16x16 { mag: 94980, sign: false }); + data.append(FP16x16 { mag: 123767, sign: true }); + data.append(FP16x16 { mag: 81525, sign: true }); + data.append(FP16x16 { mag: 100345, sign: false }); + data.append(FP16x16 { mag: 27527, sign: false }); + data.append(FP16x16 { mag: 162338, sign: false }); + data.append(FP16x16 { mag: 90315, sign: true }); + data.append(FP16x16 { mag: 98283, sign: false }); + data.append(FP16x16 { mag: 51291, sign: false }); + data.append(FP16x16 { mag: 15507, sign: false }); + data.append(FP16x16 { mag: 129858, sign: true }); + data.append(FP16x16 { mag: 70575, sign: false }); + data.append(FP16x16 { mag: 717, sign: true }); + data.append(FP16x16 { mag: 27936, sign: true }); + data.append(FP16x16 { mag: 24785, sign: false }); + data.append(FP16x16 { mag: 47070, sign: false }); + data.append(FP16x16 { mag: 53060, sign: true }); + data.append(FP16x16 { mag: 71736, sign: true }); + data.append(FP16x16 { mag: 23701, sign: true }); + data.append(FP16x16 { mag: 73045, sign: true }); + data.append(FP16x16 { mag: 90496, sign: true }); + data.append(FP16x16 { mag: 100675, sign: true }); + data.append(FP16x16 { mag: 7795, sign: true }); + data.append(FP16x16 { mag: 39581, sign: false }); + data.append(FP16x16 { mag: 3716, sign: true }); + data.append(FP16x16 { mag: 76732, sign: true }); + data.append(FP16x16 { mag: 43912, sign: true }); + data.append(FP16x16 { mag: 19320, sign: true }); + data.append(FP16x16 { mag: 22545, sign: true }); + data.append(FP16x16 { mag: 27599, sign: false }); + data.append(FP16x16 { mag: 32793, sign: true }); + data.append(FP16x16 { mag: 47706, sign: true }); + data.append(FP16x16 { mag: 96112, sign: true }); + data.append(FP16x16 { mag: 34764, sign: false }); + data.append(FP16x16 { mag: 77647, sign: true }); + data.append(FP16x16 { mag: 35485, sign: false }); + data.append(FP16x16 { mag: 5584, sign: true }); + data.append(FP16x16 { mag: 11917, sign: false }); + data.append(FP16x16 { mag: 37395, sign: true }); + data.append(FP16x16 { mag: 38246, sign: true }); + data.append(FP16x16 { mag: 34063, sign: true }); + data.append(FP16x16 { mag: 20168, sign: false }); + data.append(FP16x16 { mag: 72849, sign: false }); + data.append(FP16x16 { mag: 40801, sign: false }); + data.append(FP16x16 { mag: 42674, sign: false }); + data.append(FP16x16 { mag: 22630, sign: true }); + data.append(FP16x16 { mag: 76034, sign: false }); + data.append(FP16x16 { mag: 62973, sign: false }); + data.append(FP16x16 { mag: 116410, sign: false }); + data.append(FP16x16 { mag: 1951, sign: true }); + data.append(FP16x16 { mag: 33165, sign: true }); + data.append(FP16x16 { mag: 46154, sign: true }); + data.append(FP16x16 { mag: 50498, sign: true }); + data.append(FP16x16 { mag: 5557, sign: true }); + data.append(FP16x16 { mag: 15958, sign: true }); + data.append(FP16x16 { mag: 55572, sign: true }); + data.append(FP16x16 { mag: 116353, sign: true }); + data.append(FP16x16 { mag: 104928, sign: true }); + data.append(FP16x16 { mag: 12275, sign: false }); + data.append(FP16x16 { mag: 2617, sign: true }); + data.append(FP16x16 { mag: 13198, sign: false }); + data.append(FP16x16 { mag: 71218, sign: true }); + data.append(FP16x16 { mag: 8582, sign: false }); + data.append(FP16x16 { mag: 34259, sign: false }); + data.append(FP16x16 { mag: 32055, sign: false }); + data.append(FP16x16 { mag: 18660, sign: false }); + data.append(FP16x16 { mag: 5926, sign: true }); + data.append(FP16x16 { mag: 2802, sign: true }); + data.append(FP16x16 { mag: 71274, sign: false }); + data.append(FP16x16 { mag: 37167, sign: false }); + data.append(FP16x16 { mag: 8185, sign: true }); + data.append(FP16x16 { mag: 53587, sign: true }); + data.append(FP16x16 { mag: 24956, sign: false }); + data.append(FP16x16 { mag: 47492, sign: true }); + data.append(FP16x16 { mag: 30685, sign: true }); + data.append(FP16x16 { mag: 65599, sign: false }); + data.append(FP16x16 { mag: 110444, sign: false }); + data.append(FP16x16 { mag: 11800, sign: false }); + data.append(FP16x16 { mag: 21534, sign: true }); + data.append(FP16x16 { mag: 4907, sign: true }); + data.append(FP16x16 { mag: 67101, sign: false }); + data.append(FP16x16 { mag: 38260, sign: true }); + data.append(FP16x16 { mag: 61924, sign: true }); + data.append(FP16x16 { mag: 7527, sign: false }); + data.append(FP16x16 { mag: 49451, sign: false }); + data.append(FP16x16 { mag: 182905, sign: true }); + data.append(FP16x16 { mag: 16395, sign: true }); + data.append(FP16x16 { mag: 67153, sign: true }); + data.append(FP16x16 { mag: 31050, sign: false }); + data.append(FP16x16 { mag: 5364, sign: true }); + data.append(FP16x16 { mag: 68197, sign: true }); + data.append(FP16x16 { mag: 60008, sign: false }); + data.append(FP16x16 { mag: 77429, sign: true }); + data.append(FP16x16 { mag: 6129, sign: false }); + data.append(FP16x16 { mag: 89537, sign: true }); + data.append(FP16x16 { mag: 46834, sign: false }); + data.append(FP16x16 { mag: 60579, sign: false }); + data.append(FP16x16 { mag: 66521, sign: false }); + data.append(FP16x16 { mag: 64292, sign: false }); + data.append(FP16x16 { mag: 115133, sign: true }); + data.append(FP16x16 { mag: 94534, sign: false }); + data.append(FP16x16 { mag: 39659, sign: false }); + data.append(FP16x16 { mag: 67484, sign: true }); + data.append(FP16x16 { mag: 20442, sign: true }); + data.append(FP16x16 { mag: 54691, sign: false }); + data.append(FP16x16 { mag: 81798, sign: false }); + data.append(FP16x16 { mag: 89422, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_2d_default/output_0.cairo b/tests/nodes/maxpool_2d_default/output_0.cairo new file mode 100644 index 000000000..fc25d182a --- /dev/null +++ b/tests/nodes/maxpool_2d_default/output_0.cairo @@ -0,0 +1,162 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(3); + shape.append(7); + shape.append(7); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 77568, sign: false }); + data.append(FP16x16 { mag: 77568, sign: false }); + data.append(FP16x16 { mag: 72118, sign: false }); + data.append(FP16x16 { mag: 97321, sign: false }); + data.append(FP16x16 { mag: 97321, sign: false }); + data.append(FP16x16 { mag: 62906, sign: false }); + data.append(FP16x16 { mag: 62906, sign: false }); + data.append(FP16x16 { mag: 77568, sign: false }); + data.append(FP16x16 { mag: 77568, sign: false }); + data.append(FP16x16 { mag: 72118, sign: false }); + data.append(FP16x16 { mag: 97321, sign: false }); + data.append(FP16x16 { mag: 97321, sign: false }); + data.append(FP16x16 { mag: 160417, sign: false }); + data.append(FP16x16 { mag: 160417, sign: false }); + data.append(FP16x16 { mag: 49420, sign: false }); + data.append(FP16x16 { mag: 60389, sign: false }); + data.append(FP16x16 { mag: 82115, sign: false }); + data.append(FP16x16 { mag: 82115, sign: false }); + data.append(FP16x16 { mag: 9373, sign: false }); + data.append(FP16x16 { mag: 160417, sign: false }); + data.append(FP16x16 { mag: 160417, sign: false }); + data.append(FP16x16 { mag: 47764, sign: false }); + data.append(FP16x16 { mag: 60389, sign: false }); + data.append(FP16x16 { mag: 82115, sign: false }); + data.append(FP16x16 { mag: 108526, sign: false }); + data.append(FP16x16 { mag: 108526, sign: false }); + data.append(FP16x16 { mag: 9373, sign: false }); + data.append(FP16x16 { mag: 3608, sign: false }); + data.append(FP16x16 { mag: 47764, sign: false }); + data.append(FP16x16 { mag: 47764, sign: false }); + data.append(FP16x16 { mag: 73567, sign: false }); + data.append(FP16x16 { mag: 108526, sign: false }); + data.append(FP16x16 { mag: 108526, sign: false }); + data.append(FP16x16 { mag: 5291, sign: false }); + data.append(FP16x16 { mag: 5291, sign: false }); + data.append(FP16x16 { mag: 9991, sign: false }); + data.append(FP16x16 { mag: 104858, sign: false }); + data.append(FP16x16 { mag: 104858, sign: false }); + data.append(FP16x16 { mag: 25693, sign: false }); + data.append(FP16x16 { mag: 2257, sign: false }); + data.append(FP16x16 { mag: 35180, sign: false }); + data.append(FP16x16 { mag: 35180, sign: false }); + data.append(FP16x16 { mag: 9991, sign: false }); + data.append(FP16x16 { mag: 104858, sign: false }); + data.append(FP16x16 { mag: 104858, sign: false }); + data.append(FP16x16 { mag: 55801, sign: false }); + data.append(FP16x16 { mag: 104529, sign: false }); + data.append(FP16x16 { mag: 104529, sign: false }); + data.append(FP16x16 { mag: 35180, sign: false }); + data.append(FP16x16 { mag: 24789, sign: false }); + data.append(FP16x16 { mag: 51137, sign: false }); + data.append(FP16x16 { mag: 95646, sign: false }); + data.append(FP16x16 { mag: 99812, sign: false }); + data.append(FP16x16 { mag: 99812, sign: false }); + data.append(FP16x16 { mag: 26672, sign: false }); + data.append(FP16x16 { mag: 88843, sign: false }); + data.append(FP16x16 { mag: 100847, sign: false }); + data.append(FP16x16 { mag: 47501, sign: true }); + data.append(FP16x16 { mag: 95646, sign: false }); + data.append(FP16x16 { mag: 95646, sign: false }); + data.append(FP16x16 { mag: 94980, sign: false }); + data.append(FP16x16 { mag: 94980, sign: false }); + data.append(FP16x16 { mag: 88843, sign: false }); + data.append(FP16x16 { mag: 100847, sign: false }); + data.append(FP16x16 { mag: 162338, sign: false }); + data.append(FP16x16 { mag: 162338, sign: false }); + data.append(FP16x16 { mag: 98283, sign: false }); + data.append(FP16x16 { mag: 98283, sign: false }); + data.append(FP16x16 { mag: 94980, sign: false }); + data.append(FP16x16 { mag: 15507, sign: false }); + data.append(FP16x16 { mag: 100345, sign: false }); + data.append(FP16x16 { mag: 162338, sign: false }); + data.append(FP16x16 { mag: 162338, sign: false }); + data.append(FP16x16 { mag: 98283, sign: false }); + data.append(FP16x16 { mag: 98283, sign: false }); + data.append(FP16x16 { mag: 51291, sign: false }); + data.append(FP16x16 { mag: 15507, sign: false }); + data.append(FP16x16 { mag: 70575, sign: false }); + data.append(FP16x16 { mag: 717, sign: true }); + data.append(FP16x16 { mag: 24785, sign: false }); + data.append(FP16x16 { mag: 47070, sign: false }); + data.append(FP16x16 { mag: 47070, sign: false }); + data.append(FP16x16 { mag: 3716, sign: true }); + data.append(FP16x16 { mag: 23701, sign: true }); + data.append(FP16x16 { mag: 19320, sign: true }); + data.append(FP16x16 { mag: 27599, sign: false }); + data.append(FP16x16 { mag: 27599, sign: false }); + data.append(FP16x16 { mag: 39581, sign: false }); + data.append(FP16x16 { mag: 39581, sign: false }); + data.append(FP16x16 { mag: 34764, sign: false }); + data.append(FP16x16 { mag: 34764, sign: false }); + data.append(FP16x16 { mag: 35485, sign: false }); + data.append(FP16x16 { mag: 27599, sign: false }); + data.append(FP16x16 { mag: 27599, sign: false }); + data.append(FP16x16 { mag: 32793, sign: true }); + data.append(FP16x16 { mag: 34063, sign: true }); + data.append(FP16x16 { mag: 34764, sign: false }); + data.append(FP16x16 { mag: 72849, sign: false }); + data.append(FP16x16 { mag: 42674, sign: false }); + data.append(FP16x16 { mag: 42674, sign: false }); + data.append(FP16x16 { mag: 76034, sign: false }); + data.append(FP16x16 { mag: 76034, sign: false }); + data.append(FP16x16 { mag: 116410, sign: false }); + data.append(FP16x16 { mag: 116410, sign: false }); + data.append(FP16x16 { mag: 12275, sign: false }); + data.append(FP16x16 { mag: 13198, sign: false }); + data.append(FP16x16 { mag: 13198, sign: false }); + data.append(FP16x16 { mag: 8582, sign: false }); + data.append(FP16x16 { mag: 34259, sign: false }); + data.append(FP16x16 { mag: 34259, sign: false }); + data.append(FP16x16 { mag: 32055, sign: false }); + data.append(FP16x16 { mag: 18660, sign: false }); + data.append(FP16x16 { mag: 71274, sign: false }); + data.append(FP16x16 { mag: 71274, sign: false }); + data.append(FP16x16 { mag: 37167, sign: false }); + data.append(FP16x16 { mag: 34259, sign: false }); + data.append(FP16x16 { mag: 34259, sign: false }); + data.append(FP16x16 { mag: 32055, sign: false }); + data.append(FP16x16 { mag: 18660, sign: false }); + data.append(FP16x16 { mag: 110444, sign: false }); + data.append(FP16x16 { mag: 110444, sign: false }); + data.append(FP16x16 { mag: 37167, sign: false }); + data.append(FP16x16 { mag: 4907, sign: true }); + data.append(FP16x16 { mag: 67101, sign: false }); + data.append(FP16x16 { mag: 67101, sign: false }); + data.append(FP16x16 { mag: 30685, sign: true }); + data.append(FP16x16 { mag: 110444, sign: false }); + data.append(FP16x16 { mag: 110444, sign: false }); + data.append(FP16x16 { mag: 11800, sign: false }); + data.append(FP16x16 { mag: 4907, sign: true }); + data.append(FP16x16 { mag: 67101, sign: false }); + data.append(FP16x16 { mag: 67101, sign: false }); + data.append(FP16x16 { mag: 5364, sign: true }); + data.append(FP16x16 { mag: 60008, sign: false }); + data.append(FP16x16 { mag: 49451, sign: false }); + data.append(FP16x16 { mag: 6129, sign: false }); + data.append(FP16x16 { mag: 46834, sign: false }); + data.append(FP16x16 { mag: 60579, sign: false }); + data.append(FP16x16 { mag: 66521, sign: false }); + data.append(FP16x16 { mag: 66521, sign: false }); + data.append(FP16x16 { mag: 94534, sign: false }); + data.append(FP16x16 { mag: 94534, sign: false }); + data.append(FP16x16 { mag: 39659, sign: false }); + data.append(FP16x16 { mag: 46834, sign: false }); + data.append(FP16x16 { mag: 60579, sign: false }); + data.append(FP16x16 { mag: 81798, sign: false }); + data.append(FP16x16 { mag: 89422, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_2d_dilations.cairo b/tests/nodes/maxpool_2d_dilations.cairo new file mode 100644 index 000000000..dc8686055 --- /dev/null +++ b/tests/nodes/maxpool_2d_dilations.cairo @@ -0,0 +1,30 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_maxpool_2d_dilations() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let (y_0, _) = NNTrait::max_pool( + @input_0, + Option::None, + Option::None, + Option::Some(array![2, 2].span()), + array![2, 2].span(), + Option::None, + Option::None, + Option::None, + 1 + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/maxpool_2d_dilations/input_0.cairo b/tests/nodes/maxpool_2d_dilations/input_0.cairo new file mode 100644 index 000000000..29b195332 --- /dev/null +++ b/tests/nodes/maxpool_2d_dilations/input_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_2d_dilations/output_0.cairo b/tests/nodes/maxpool_2d_dilations/output_0.cairo new file mode 100644 index 000000000..af7270750 --- /dev/null +++ b/tests/nodes/maxpool_2d_dilations/output_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_2d_pads_default.cairo b/tests/nodes/maxpool_2d_pads_default.cairo new file mode 100644 index 000000000..103017375 --- /dev/null +++ b/tests/nodes/maxpool_2d_pads_default.cairo @@ -0,0 +1,30 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_maxpool_2d_pads_default() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let (y_0, _) = NNTrait::max_pool( + @input_0, + Option::None, + Option::None, + Option::None, + array![5, 5].span(), + Option::Some(array![2, 2, 2, 2].span()), + Option::None, + Option::None, + 1 + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/maxpool_2d_pads_default/input_0.cairo b/tests/nodes/maxpool_2d_pads_default/input_0.cairo new file mode 100644 index 000000000..28d2d90c9 --- /dev/null +++ b/tests/nodes/maxpool_2d_pads_default/input_0.cairo @@ -0,0 +1,40 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(5); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_2d_pads_default/output_0.cairo b/tests/nodes/maxpool_2d_pads_default/output_0.cairo new file mode 100644 index 000000000..c31cfdd9f --- /dev/null +++ b/tests/nodes/maxpool_2d_pads_default/output_0.cairo @@ -0,0 +1,40 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(5); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_2d_same_lower_default.cairo b/tests/nodes/maxpool_2d_same_lower_default.cairo new file mode 100644 index 000000000..e947cc879 --- /dev/null +++ b/tests/nodes/maxpool_2d_same_lower_default.cairo @@ -0,0 +1,31 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::operators::nn::FP16x16NN; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::nn::AUTO_PAD; + +#[test] +#[available_gas(2000000000)] +fn test_maxpool_2d_same_lower_default() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let (y_0, _) = NNTrait::max_pool( + @input_0, + Option::Some(AUTO_PAD::SAME_LOWER), + Option::None, + Option::None, + array![2, 2].span(), + Option::None, + Option::None, + Option::None, + 1 + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/maxpool_2d_same_lower_default/input_0.cairo b/tests/nodes/maxpool_2d_same_lower_default/input_0.cairo new file mode 100644 index 000000000..2d3b05884 --- /dev/null +++ b/tests/nodes/maxpool_2d_same_lower_default/input_0.cairo @@ -0,0 +1,207 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(3); + shape.append(8); + shape.append(8); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 138703, sign: true }); + data.append(FP16x16 { mag: 17593, sign: false }); + data.append(FP16x16 { mag: 37316, sign: true }); + data.append(FP16x16 { mag: 18101, sign: true }); + data.append(FP16x16 { mag: 100611, sign: true }); + data.append(FP16x16 { mag: 659, sign: false }); + data.append(FP16x16 { mag: 100276, sign: false }); + data.append(FP16x16 { mag: 38390, sign: true }); + data.append(FP16x16 { mag: 33210, sign: false }); + data.append(FP16x16 { mag: 61895, sign: true }); + data.append(FP16x16 { mag: 7680, sign: true }); + data.append(FP16x16 { mag: 13113, sign: false }); + data.append(FP16x16 { mag: 72129, sign: true }); + data.append(FP16x16 { mag: 79353, sign: false }); + data.append(FP16x16 { mag: 4830, sign: false }); + data.append(FP16x16 { mag: 37310, sign: true }); + data.append(FP16x16 { mag: 59023, sign: false }); + data.append(FP16x16 { mag: 59144, sign: false }); + data.append(FP16x16 { mag: 20810, sign: true }); + data.append(FP16x16 { mag: 191104, sign: false }); + data.append(FP16x16 { mag: 41831, sign: false }); + data.append(FP16x16 { mag: 85357, sign: true }); + data.append(FP16x16 { mag: 68515, sign: false }); + data.append(FP16x16 { mag: 28484, sign: false }); + data.append(FP16x16 { mag: 45074, sign: false }); + data.append(FP16x16 { mag: 44778, sign: true }); + data.append(FP16x16 { mag: 36984, sign: false }); + data.append(FP16x16 { mag: 66850, sign: false }); + data.append(FP16x16 { mag: 112661, sign: false }); + data.append(FP16x16 { mag: 23651, sign: true }); + data.append(FP16x16 { mag: 79272, sign: false }); + data.append(FP16x16 { mag: 154926, sign: true }); + data.append(FP16x16 { mag: 10887, sign: true }); + data.append(FP16x16 { mag: 6880, sign: true }); + data.append(FP16x16 { mag: 59713, sign: true }); + data.append(FP16x16 { mag: 36990, sign: true }); + data.append(FP16x16 { mag: 47134, sign: true }); + data.append(FP16x16 { mag: 103368, sign: false }); + data.append(FP16x16 { mag: 94963, sign: true }); + data.append(FP16x16 { mag: 9558, sign: true }); + data.append(FP16x16 { mag: 141332, sign: true }); + data.append(FP16x16 { mag: 32922, sign: true }); + data.append(FP16x16 { mag: 14154, sign: true }); + data.append(FP16x16 { mag: 138698, sign: false }); + data.append(FP16x16 { mag: 26096, sign: false }); + data.append(FP16x16 { mag: 91856, sign: false }); + data.append(FP16x16 { mag: 31118, sign: true }); + data.append(FP16x16 { mag: 508, sign: false }); + data.append(FP16x16 { mag: 35988, sign: false }); + data.append(FP16x16 { mag: 9381, sign: true }); + data.append(FP16x16 { mag: 10816, sign: true }); + data.append(FP16x16 { mag: 28140, sign: false }); + data.append(FP16x16 { mag: 10298, sign: true }); + data.append(FP16x16 { mag: 68132, sign: false }); + data.append(FP16x16 { mag: 80322, sign: false }); + data.append(FP16x16 { mag: 88352, sign: false }); + data.append(FP16x16 { mag: 100098, sign: false }); + data.append(FP16x16 { mag: 53069, sign: false }); + data.append(FP16x16 { mag: 155072, sign: false }); + data.append(FP16x16 { mag: 22128, sign: true }); + data.append(FP16x16 { mag: 172627, sign: true }); + data.append(FP16x16 { mag: 20198, sign: false }); + data.append(FP16x16 { mag: 3764, sign: true }); + data.append(FP16x16 { mag: 34532, sign: false }); + data.append(FP16x16 { mag: 81111, sign: true }); + data.append(FP16x16 { mag: 38033, sign: true }); + data.append(FP16x16 { mag: 25795, sign: true }); + data.append(FP16x16 { mag: 53914, sign: true }); + data.append(FP16x16 { mag: 58934, sign: true }); + data.append(FP16x16 { mag: 74080, sign: true }); + data.append(FP16x16 { mag: 53723, sign: true }); + data.append(FP16x16 { mag: 10926, sign: false }); + data.append(FP16x16 { mag: 84619, sign: false }); + data.append(FP16x16 { mag: 188693, sign: false }); + data.append(FP16x16 { mag: 37774, sign: false }); + data.append(FP16x16 { mag: 33855, sign: false }); + data.append(FP16x16 { mag: 86756, sign: false }); + data.append(FP16x16 { mag: 47341, sign: false }); + data.append(FP16x16 { mag: 16804, sign: false }); + data.append(FP16x16 { mag: 19410, sign: true }); + data.append(FP16x16 { mag: 4857, sign: true }); + data.append(FP16x16 { mag: 144907, sign: false }); + data.append(FP16x16 { mag: 19674, sign: false }); + data.append(FP16x16 { mag: 7336, sign: true }); + data.append(FP16x16 { mag: 83249, sign: true }); + data.append(FP16x16 { mag: 13101, sign: true }); + data.append(FP16x16 { mag: 13796, sign: false }); + data.append(FP16x16 { mag: 23641, sign: true }); + data.append(FP16x16 { mag: 61764, sign: true }); + data.append(FP16x16 { mag: 28933, sign: true }); + data.append(FP16x16 { mag: 79450, sign: false }); + data.append(FP16x16 { mag: 43751, sign: false }); + data.append(FP16x16 { mag: 99475, sign: false }); + data.append(FP16x16 { mag: 99879, sign: true }); + data.append(FP16x16 { mag: 16143, sign: false }); + data.append(FP16x16 { mag: 19630, sign: true }); + data.append(FP16x16 { mag: 119844, sign: false }); + data.append(FP16x16 { mag: 4729, sign: false }); + data.append(FP16x16 { mag: 40092, sign: false }); + data.append(FP16x16 { mag: 52497, sign: false }); + data.append(FP16x16 { mag: 28145, sign: true }); + data.append(FP16x16 { mag: 31321, sign: false }); + data.append(FP16x16 { mag: 47437, sign: false }); + data.append(FP16x16 { mag: 59558, sign: false }); + data.append(FP16x16 { mag: 154091, sign: false }); + data.append(FP16x16 { mag: 60540, sign: false }); + data.append(FP16x16 { mag: 46907, sign: true }); + data.append(FP16x16 { mag: 84671, sign: false }); + data.append(FP16x16 { mag: 56013, sign: false }); + data.append(FP16x16 { mag: 58264, sign: true }); + data.append(FP16x16 { mag: 20243, sign: false }); + data.append(FP16x16 { mag: 147219, sign: true }); + data.append(FP16x16 { mag: 36880, sign: false }); + data.append(FP16x16 { mag: 80459, sign: true }); + data.append(FP16x16 { mag: 52556, sign: false }); + data.append(FP16x16 { mag: 176520, sign: true }); + data.append(FP16x16 { mag: 89561, sign: true }); + data.append(FP16x16 { mag: 45221, sign: false }); + data.append(FP16x16 { mag: 37020, sign: false }); + data.append(FP16x16 { mag: 88532, sign: true }); + data.append(FP16x16 { mag: 99592, sign: false }); + data.append(FP16x16 { mag: 6673, sign: true }); + data.append(FP16x16 { mag: 20497, sign: true }); + data.append(FP16x16 { mag: 48790, sign: false }); + data.append(FP16x16 { mag: 63481, sign: true }); + data.append(FP16x16 { mag: 93939, sign: true }); + data.append(FP16x16 { mag: 10523, sign: true }); + data.append(FP16x16 { mag: 90627, sign: false }); + data.append(FP16x16 { mag: 15429, sign: false }); + data.append(FP16x16 { mag: 9882, sign: false }); + data.append(FP16x16 { mag: 88221, sign: false }); + data.append(FP16x16 { mag: 103220, sign: false }); + data.append(FP16x16 { mag: 18470, sign: false }); + data.append(FP16x16 { mag: 116464, sign: true }); + data.append(FP16x16 { mag: 45172, sign: true }); + data.append(FP16x16 { mag: 28246, sign: false }); + data.append(FP16x16 { mag: 62933, sign: true }); + data.append(FP16x16 { mag: 80332, sign: false }); + data.append(FP16x16 { mag: 21278, sign: true }); + data.append(FP16x16 { mag: 56583, sign: true }); + data.append(FP16x16 { mag: 34590, sign: true }); + data.append(FP16x16 { mag: 48885, sign: true }); + data.append(FP16x16 { mag: 38070, sign: true }); + data.append(FP16x16 { mag: 51209, sign: true }); + data.append(FP16x16 { mag: 44554, sign: false }); + data.append(FP16x16 { mag: 75396, sign: true }); + data.append(FP16x16 { mag: 162232, sign: true }); + data.append(FP16x16 { mag: 85388, sign: true }); + data.append(FP16x16 { mag: 77567, sign: true }); + data.append(FP16x16 { mag: 46076, sign: false }); + data.append(FP16x16 { mag: 1258, sign: false }); + data.append(FP16x16 { mag: 75938, sign: true }); + data.append(FP16x16 { mag: 19808, sign: true }); + data.append(FP16x16 { mag: 3602, sign: true }); + data.append(FP16x16 { mag: 26122, sign: true }); + data.append(FP16x16 { mag: 48685, sign: true }); + data.append(FP16x16 { mag: 67709, sign: true }); + data.append(FP16x16 { mag: 15860, sign: false }); + data.append(FP16x16 { mag: 59382, sign: false }); + data.append(FP16x16 { mag: 39707, sign: true }); + data.append(FP16x16 { mag: 3979, sign: true }); + data.append(FP16x16 { mag: 14954, sign: false }); + data.append(FP16x16 { mag: 94433, sign: true }); + data.append(FP16x16 { mag: 24674, sign: false }); + data.append(FP16x16 { mag: 32149, sign: true }); + data.append(FP16x16 { mag: 96812, sign: false }); + data.append(FP16x16 { mag: 32335, sign: true }); + data.append(FP16x16 { mag: 26743, sign: false }); + data.append(FP16x16 { mag: 64580, sign: false }); + data.append(FP16x16 { mag: 17788, sign: true }); + data.append(FP16x16 { mag: 25898, sign: false }); + data.append(FP16x16 { mag: 36605, sign: false }); + data.append(FP16x16 { mag: 78960, sign: true }); + data.append(FP16x16 { mag: 117485, sign: true }); + data.append(FP16x16 { mag: 23270, sign: true }); + data.append(FP16x16 { mag: 1539, sign: false }); + data.append(FP16x16 { mag: 46000, sign: false }); + data.append(FP16x16 { mag: 110506, sign: true }); + data.append(FP16x16 { mag: 37096, sign: false }); + data.append(FP16x16 { mag: 24200, sign: true }); + data.append(FP16x16 { mag: 51581, sign: false }); + data.append(FP16x16 { mag: 17036, sign: false }); + data.append(FP16x16 { mag: 21576, sign: true }); + data.append(FP16x16 { mag: 61805, sign: false }); + data.append(FP16x16 { mag: 30579, sign: false }); + data.append(FP16x16 { mag: 23251, sign: true }); + data.append(FP16x16 { mag: 37590, sign: true }); + data.append(FP16x16 { mag: 30907, sign: false }); + data.append(FP16x16 { mag: 10479, sign: false }); + data.append(FP16x16 { mag: 15777, sign: true }); + data.append(FP16x16 { mag: 12917, sign: false }); + data.append(FP16x16 { mag: 109290, sign: true }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_2d_same_lower_default/output_0.cairo b/tests/nodes/maxpool_2d_same_lower_default/output_0.cairo new file mode 100644 index 000000000..b272a29cb --- /dev/null +++ b/tests/nodes/maxpool_2d_same_lower_default/output_0.cairo @@ -0,0 +1,207 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(3); + shape.append(8); + shape.append(8); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 138703, sign: true }); + data.append(FP16x16 { mag: 17593, sign: false }); + data.append(FP16x16 { mag: 17593, sign: false }); + data.append(FP16x16 { mag: 18101, sign: true }); + data.append(FP16x16 { mag: 18101, sign: true }); + data.append(FP16x16 { mag: 659, sign: false }); + data.append(FP16x16 { mag: 100276, sign: false }); + data.append(FP16x16 { mag: 100276, sign: false }); + data.append(FP16x16 { mag: 33210, sign: false }); + data.append(FP16x16 { mag: 33210, sign: false }); + data.append(FP16x16 { mag: 17593, sign: false }); + data.append(FP16x16 { mag: 13113, sign: false }); + data.append(FP16x16 { mag: 13113, sign: false }); + data.append(FP16x16 { mag: 79353, sign: false }); + data.append(FP16x16 { mag: 100276, sign: false }); + data.append(FP16x16 { mag: 100276, sign: false }); + data.append(FP16x16 { mag: 59023, sign: false }); + data.append(FP16x16 { mag: 59144, sign: false }); + data.append(FP16x16 { mag: 59144, sign: false }); + data.append(FP16x16 { mag: 191104, sign: false }); + data.append(FP16x16 { mag: 191104, sign: false }); + data.append(FP16x16 { mag: 79353, sign: false }); + data.append(FP16x16 { mag: 79353, sign: false }); + data.append(FP16x16 { mag: 68515, sign: false }); + data.append(FP16x16 { mag: 59023, sign: false }); + data.append(FP16x16 { mag: 59144, sign: false }); + data.append(FP16x16 { mag: 59144, sign: false }); + data.append(FP16x16 { mag: 191104, sign: false }); + data.append(FP16x16 { mag: 191104, sign: false }); + data.append(FP16x16 { mag: 112661, sign: false }); + data.append(FP16x16 { mag: 79272, sign: false }); + data.append(FP16x16 { mag: 79272, sign: false }); + data.append(FP16x16 { mag: 45074, sign: false }); + data.append(FP16x16 { mag: 45074, sign: false }); + data.append(FP16x16 { mag: 36984, sign: false }); + data.append(FP16x16 { mag: 66850, sign: false }); + data.append(FP16x16 { mag: 112661, sign: false }); + data.append(FP16x16 { mag: 112661, sign: false }); + data.append(FP16x16 { mag: 103368, sign: false }); + data.append(FP16x16 { mag: 79272, sign: false }); + data.append(FP16x16 { mag: 10887, sign: true }); + data.append(FP16x16 { mag: 6880, sign: true }); + data.append(FP16x16 { mag: 6880, sign: true }); + data.append(FP16x16 { mag: 138698, sign: false }); + data.append(FP16x16 { mag: 138698, sign: false }); + data.append(FP16x16 { mag: 103368, sign: false }); + data.append(FP16x16 { mag: 103368, sign: false }); + data.append(FP16x16 { mag: 508, sign: false }); + data.append(FP16x16 { mag: 35988, sign: false }); + data.append(FP16x16 { mag: 35988, sign: false }); + data.append(FP16x16 { mag: 9381, sign: true }); + data.append(FP16x16 { mag: 138698, sign: false }); + data.append(FP16x16 { mag: 138698, sign: false }); + data.append(FP16x16 { mag: 91856, sign: false }); + data.append(FP16x16 { mag: 91856, sign: false }); + data.append(FP16x16 { mag: 88352, sign: false }); + data.append(FP16x16 { mag: 100098, sign: false }); + data.append(FP16x16 { mag: 100098, sign: false }); + data.append(FP16x16 { mag: 155072, sign: false }); + data.append(FP16x16 { mag: 155072, sign: false }); + data.append(FP16x16 { mag: 28140, sign: false }); + data.append(FP16x16 { mag: 68132, sign: false }); + data.append(FP16x16 { mag: 80322, sign: false }); + data.append(FP16x16 { mag: 88352, sign: false }); + data.append(FP16x16 { mag: 81111, sign: true }); + data.append(FP16x16 { mag: 38033, sign: true }); + data.append(FP16x16 { mag: 25795, sign: true }); + data.append(FP16x16 { mag: 25795, sign: true }); + data.append(FP16x16 { mag: 53914, sign: true }); + data.append(FP16x16 { mag: 58934, sign: true }); + data.append(FP16x16 { mag: 53723, sign: true }); + data.append(FP16x16 { mag: 10926, sign: false }); + data.append(FP16x16 { mag: 84619, sign: false }); + data.append(FP16x16 { mag: 188693, sign: false }); + data.append(FP16x16 { mag: 188693, sign: false }); + data.append(FP16x16 { mag: 37774, sign: false }); + data.append(FP16x16 { mag: 86756, sign: false }); + data.append(FP16x16 { mag: 86756, sign: false }); + data.append(FP16x16 { mag: 47341, sign: false }); + data.append(FP16x16 { mag: 16804, sign: false }); + data.append(FP16x16 { mag: 84619, sign: false }); + data.append(FP16x16 { mag: 188693, sign: false }); + data.append(FP16x16 { mag: 188693, sign: false }); + data.append(FP16x16 { mag: 37774, sign: false }); + data.append(FP16x16 { mag: 86756, sign: false }); + data.append(FP16x16 { mag: 86756, sign: false }); + data.append(FP16x16 { mag: 47341, sign: false }); + data.append(FP16x16 { mag: 16804, sign: false }); + data.append(FP16x16 { mag: 4857, sign: true }); + data.append(FP16x16 { mag: 144907, sign: false }); + data.append(FP16x16 { mag: 144907, sign: false }); + data.append(FP16x16 { mag: 79450, sign: false }); + data.append(FP16x16 { mag: 99475, sign: false }); + data.append(FP16x16 { mag: 99475, sign: false }); + data.append(FP16x16 { mag: 16143, sign: false }); + data.append(FP16x16 { mag: 16143, sign: false }); + data.append(FP16x16 { mag: 119844, sign: false }); + data.append(FP16x16 { mag: 119844, sign: false }); + data.append(FP16x16 { mag: 79450, sign: false }); + data.append(FP16x16 { mag: 79450, sign: false }); + data.append(FP16x16 { mag: 99475, sign: false }); + data.append(FP16x16 { mag: 99475, sign: false }); + data.append(FP16x16 { mag: 47437, sign: false }); + data.append(FP16x16 { mag: 59558, sign: false }); + data.append(FP16x16 { mag: 154091, sign: false }); + data.append(FP16x16 { mag: 154091, sign: false }); + data.append(FP16x16 { mag: 60540, sign: false }); + data.append(FP16x16 { mag: 84671, sign: false }); + data.append(FP16x16 { mag: 84671, sign: false }); + data.append(FP16x16 { mag: 56013, sign: false }); + data.append(FP16x16 { mag: 47437, sign: false }); + data.append(FP16x16 { mag: 59558, sign: false }); + data.append(FP16x16 { mag: 154091, sign: false }); + data.append(FP16x16 { mag: 154091, sign: false }); + data.append(FP16x16 { mag: 60540, sign: false }); + data.append(FP16x16 { mag: 84671, sign: false }); + data.append(FP16x16 { mag: 84671, sign: false }); + data.append(FP16x16 { mag: 56013, sign: false }); + data.append(FP16x16 { mag: 45221, sign: false }); + data.append(FP16x16 { mag: 37020, sign: false }); + data.append(FP16x16 { mag: 99592, sign: false }); + data.append(FP16x16 { mag: 99592, sign: false }); + data.append(FP16x16 { mag: 52556, sign: false }); + data.append(FP16x16 { mag: 52556, sign: false }); + data.append(FP16x16 { mag: 48790, sign: false }); + data.append(FP16x16 { mag: 45221, sign: false }); + data.append(FP16x16 { mag: 45221, sign: false }); + data.append(FP16x16 { mag: 90627, sign: false }); + data.append(FP16x16 { mag: 15429, sign: false }); + data.append(FP16x16 { mag: 15429, sign: false }); + data.append(FP16x16 { mag: 88221, sign: false }); + data.append(FP16x16 { mag: 103220, sign: false }); + data.append(FP16x16 { mag: 103220, sign: false }); + data.append(FP16x16 { mag: 18470, sign: false }); + data.append(FP16x16 { mag: 45172, sign: true }); + data.append(FP16x16 { mag: 28246, sign: false }); + data.append(FP16x16 { mag: 15429, sign: false }); + data.append(FP16x16 { mag: 80332, sign: false }); + data.append(FP16x16 { mag: 88221, sign: false }); + data.append(FP16x16 { mag: 103220, sign: false }); + data.append(FP16x16 { mag: 103220, sign: false }); + data.append(FP16x16 { mag: 18470, sign: false }); + data.append(FP16x16 { mag: 38070, sign: true }); + data.append(FP16x16 { mag: 28246, sign: false }); + data.append(FP16x16 { mag: 44554, sign: false }); + data.append(FP16x16 { mag: 80332, sign: false }); + data.append(FP16x16 { mag: 80332, sign: false }); + data.append(FP16x16 { mag: 21278, sign: true }); + data.append(FP16x16 { mag: 34590, sign: true }); + data.append(FP16x16 { mag: 46076, sign: false }); + data.append(FP16x16 { mag: 46076, sign: false }); + data.append(FP16x16 { mag: 1258, sign: false }); + data.append(FP16x16 { mag: 44554, sign: false }); + data.append(FP16x16 { mag: 44554, sign: false }); + data.append(FP16x16 { mag: 3602, sign: true }); + data.append(FP16x16 { mag: 26122, sign: true }); + data.append(FP16x16 { mag: 48685, sign: true }); + data.append(FP16x16 { mag: 46076, sign: false }); + data.append(FP16x16 { mag: 59382, sign: false }); + data.append(FP16x16 { mag: 59382, sign: false }); + data.append(FP16x16 { mag: 3979, sign: true }); + data.append(FP16x16 { mag: 14954, sign: false }); + data.append(FP16x16 { mag: 14954, sign: false }); + data.append(FP16x16 { mag: 24674, sign: false }); + data.append(FP16x16 { mag: 24674, sign: false }); + data.append(FP16x16 { mag: 96812, sign: false }); + data.append(FP16x16 { mag: 96812, sign: false }); + data.append(FP16x16 { mag: 59382, sign: false }); + data.append(FP16x16 { mag: 64580, sign: false }); + data.append(FP16x16 { mag: 64580, sign: false }); + data.append(FP16x16 { mag: 25898, sign: false }); + data.append(FP16x16 { mag: 36605, sign: false }); + data.append(FP16x16 { mag: 36605, sign: false }); + data.append(FP16x16 { mag: 96812, sign: false }); + data.append(FP16x16 { mag: 96812, sign: false }); + data.append(FP16x16 { mag: 26743, sign: false }); + data.append(FP16x16 { mag: 64580, sign: false }); + data.append(FP16x16 { mag: 64580, sign: false }); + data.append(FP16x16 { mag: 37096, sign: false }); + data.append(FP16x16 { mag: 37096, sign: false }); + data.append(FP16x16 { mag: 51581, sign: false }); + data.append(FP16x16 { mag: 51581, sign: false }); + data.append(FP16x16 { mag: 17036, sign: false }); + data.append(FP16x16 { mag: 61805, sign: false }); + data.append(FP16x16 { mag: 46000, sign: false }); + data.append(FP16x16 { mag: 46000, sign: false }); + data.append(FP16x16 { mag: 37096, sign: false }); + data.append(FP16x16 { mag: 37096, sign: false }); + data.append(FP16x16 { mag: 51581, sign: false }); + data.append(FP16x16 { mag: 51581, sign: false }); + data.append(FP16x16 { mag: 17036, sign: false }); + data.append(FP16x16 { mag: 61805, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_2d_same_upper.cairo b/tests/nodes/maxpool_2d_same_upper.cairo new file mode 100644 index 000000000..45b47735b --- /dev/null +++ b/tests/nodes/maxpool_2d_same_upper.cairo @@ -0,0 +1,31 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::FP16x16NN; +use orion::operators::nn::AUTO_PAD; + +#[test] +#[available_gas(2000000000)] +fn test_maxpool_2d_same_upper() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let (y_0, _) = NNTrait::max_pool( + @input_0, + Option::Some(AUTO_PAD::SAME_UPPER), + Option::None, + Option::None, + array![3, 3].span(), + Option::None, + Option::None, + Option::Some(array![2, 2].span()), + 1 + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/maxpool_2d_same_upper/input_0.cairo b/tests/nodes/maxpool_2d_same_upper/input_0.cairo new file mode 100644 index 000000000..28d2d90c9 --- /dev/null +++ b/tests/nodes/maxpool_2d_same_upper/input_0.cairo @@ -0,0 +1,40 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(5); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_2d_same_upper/output_0.cairo b/tests/nodes/maxpool_2d_same_upper/output_0.cairo new file mode 100644 index 000000000..43239cab2 --- /dev/null +++ b/tests/nodes/maxpool_2d_same_upper/output_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_2d_same_upper_default.cairo b/tests/nodes/maxpool_2d_same_upper_default.cairo new file mode 100644 index 000000000..9bb855712 --- /dev/null +++ b/tests/nodes/maxpool_2d_same_upper_default.cairo @@ -0,0 +1,31 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::FP16x16NN; +use orion::operators::nn::AUTO_PAD; + +#[test] +#[available_gas(2000000000)] +fn test_maxpool_2d_same_upper_default() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let (y_0, _) = NNTrait::max_pool( + @input_0, + Option::Some(AUTO_PAD::SAME_UPPER), + Option::None, + Option::None, + array![2, 2].span(), + Option::None, + Option::None, + Option::None, + 1 + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/maxpool_2d_same_upper_default/input_0.cairo b/tests/nodes/maxpool_2d_same_upper_default/input_0.cairo new file mode 100644 index 000000000..fdaea89f1 --- /dev/null +++ b/tests/nodes/maxpool_2d_same_upper_default/input_0.cairo @@ -0,0 +1,207 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(3); + shape.append(8); + shape.append(8); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 52692, sign: true }); + data.append(FP16x16 { mag: 28547, sign: false }); + data.append(FP16x16 { mag: 61903, sign: false }); + data.append(FP16x16 { mag: 117176, sign: true }); + data.append(FP16x16 { mag: 55156, sign: true }); + data.append(FP16x16 { mag: 120666, sign: true }); + data.append(FP16x16 { mag: 11052, sign: true }); + data.append(FP16x16 { mag: 8291, sign: false }); + data.append(FP16x16 { mag: 30701, sign: true }); + data.append(FP16x16 { mag: 17967, sign: false }); + data.append(FP16x16 { mag: 88529, sign: false }); + data.append(FP16x16 { mag: 47477, sign: true }); + data.append(FP16x16 { mag: 2290, sign: false }); + data.append(FP16x16 { mag: 92642, sign: false }); + data.append(FP16x16 { mag: 25259, sign: false }); + data.append(FP16x16 { mag: 4575, sign: true }); + data.append(FP16x16 { mag: 2693, sign: false }); + data.append(FP16x16 { mag: 105488, sign: true }); + data.append(FP16x16 { mag: 60124, sign: true }); + data.append(FP16x16 { mag: 46704, sign: true }); + data.append(FP16x16 { mag: 59101, sign: false }); + data.append(FP16x16 { mag: 49138, sign: true }); + data.append(FP16x16 { mag: 19003, sign: false }); + data.append(FP16x16 { mag: 17818, sign: false }); + data.append(FP16x16 { mag: 67683, sign: true }); + data.append(FP16x16 { mag: 77933, sign: true }); + data.append(FP16x16 { mag: 12737, sign: true }); + data.append(FP16x16 { mag: 65213, sign: false }); + data.append(FP16x16 { mag: 97849, sign: true }); + data.append(FP16x16 { mag: 49974, sign: false }); + data.append(FP16x16 { mag: 102583, sign: true }); + data.append(FP16x16 { mag: 117996, sign: false }); + data.append(FP16x16 { mag: 43197, sign: false }); + data.append(FP16x16 { mag: 87131, sign: true }); + data.append(FP16x16 { mag: 165019, sign: false }); + data.append(FP16x16 { mag: 35679, sign: true }); + data.append(FP16x16 { mag: 234, sign: false }); + data.append(FP16x16 { mag: 26030, sign: false }); + data.append(FP16x16 { mag: 4122, sign: false }); + data.append(FP16x16 { mag: 47426, sign: false }); + data.append(FP16x16 { mag: 22922, sign: true }); + data.append(FP16x16 { mag: 117833, sign: true }); + data.append(FP16x16 { mag: 100009, sign: false }); + data.append(FP16x16 { mag: 4360, sign: true }); + data.append(FP16x16 { mag: 38570, sign: false }); + data.append(FP16x16 { mag: 163610, sign: false }); + data.append(FP16x16 { mag: 27943, sign: false }); + data.append(FP16x16 { mag: 46610, sign: false }); + data.append(FP16x16 { mag: 27879, sign: false }); + data.append(FP16x16 { mag: 90383, sign: true }); + data.append(FP16x16 { mag: 14715, sign: true }); + data.append(FP16x16 { mag: 96467, sign: true }); + data.append(FP16x16 { mag: 9921, sign: false }); + data.append(FP16x16 { mag: 77435, sign: false }); + data.append(FP16x16 { mag: 54601, sign: true }); + data.append(FP16x16 { mag: 2255, sign: true }); + data.append(FP16x16 { mag: 1811, sign: true }); + data.append(FP16x16 { mag: 27154, sign: false }); + data.append(FP16x16 { mag: 28554, sign: false }); + data.append(FP16x16 { mag: 68574, sign: false }); + data.append(FP16x16 { mag: 10013, sign: false }); + data.append(FP16x16 { mag: 54722, sign: true }); + data.append(FP16x16 { mag: 51289, sign: true }); + data.append(FP16x16 { mag: 104200, sign: true }); + data.append(FP16x16 { mag: 72679, sign: true }); + data.append(FP16x16 { mag: 49102, sign: false }); + data.append(FP16x16 { mag: 73473, sign: false }); + data.append(FP16x16 { mag: 12392, sign: true }); + data.append(FP16x16 { mag: 13918, sign: false }); + data.append(FP16x16 { mag: 25166, sign: false }); + data.append(FP16x16 { mag: 54632, sign: false }); + data.append(FP16x16 { mag: 71299, sign: true }); + data.append(FP16x16 { mag: 79231, sign: true }); + data.append(FP16x16 { mag: 20472, sign: false }); + data.append(FP16x16 { mag: 19723, sign: false }); + data.append(FP16x16 { mag: 44396, sign: true }); + data.append(FP16x16 { mag: 45735, sign: false }); + data.append(FP16x16 { mag: 73626, sign: false }); + data.append(FP16x16 { mag: 1061, sign: true }); + data.append(FP16x16 { mag: 17482, sign: true }); + data.append(FP16x16 { mag: 20656, sign: true }); + data.append(FP16x16 { mag: 69032, sign: true }); + data.append(FP16x16 { mag: 7840, sign: false }); + data.append(FP16x16 { mag: 1006, sign: false }); + data.append(FP16x16 { mag: 65113, sign: true }); + data.append(FP16x16 { mag: 56413, sign: false }); + data.append(FP16x16 { mag: 28968, sign: true }); + data.append(FP16x16 { mag: 52619, sign: false }); + data.append(FP16x16 { mag: 4590, sign: false }); + data.append(FP16x16 { mag: 15977, sign: true }); + data.append(FP16x16 { mag: 40501, sign: true }); + data.append(FP16x16 { mag: 2693, sign: false }); + data.append(FP16x16 { mag: 55620, sign: false }); + data.append(FP16x16 { mag: 6900, sign: true }); + data.append(FP16x16 { mag: 13408, sign: false }); + data.append(FP16x16 { mag: 55598, sign: true }); + data.append(FP16x16 { mag: 13670, sign: false }); + data.append(FP16x16 { mag: 4231, sign: true }); + data.append(FP16x16 { mag: 47002, sign: true }); + data.append(FP16x16 { mag: 60663, sign: false }); + data.append(FP16x16 { mag: 26283, sign: false }); + data.append(FP16x16 { mag: 156112, sign: true }); + data.append(FP16x16 { mag: 9884, sign: true }); + data.append(FP16x16 { mag: 6926, sign: true }); + data.append(FP16x16 { mag: 8429, sign: true }); + data.append(FP16x16 { mag: 3327, sign: true }); + data.append(FP16x16 { mag: 45839, sign: true }); + data.append(FP16x16 { mag: 57187, sign: false }); + data.append(FP16x16 { mag: 110913, sign: true }); + data.append(FP16x16 { mag: 62795, sign: true }); + data.append(FP16x16 { mag: 109207, sign: false }); + data.append(FP16x16 { mag: 13215, sign: false }); + data.append(FP16x16 { mag: 38528, sign: true }); + data.append(FP16x16 { mag: 59562, sign: false }); + data.append(FP16x16 { mag: 26280, sign: true }); + data.append(FP16x16 { mag: 162194, sign: false }); + data.append(FP16x16 { mag: 61452, sign: false }); + data.append(FP16x16 { mag: 120157, sign: true }); + data.append(FP16x16 { mag: 50927, sign: false }); + data.append(FP16x16 { mag: 47813, sign: false }); + data.append(FP16x16 { mag: 62074, sign: false }); + data.append(FP16x16 { mag: 163638, sign: false }); + data.append(FP16x16 { mag: 21818, sign: true }); + data.append(FP16x16 { mag: 90475, sign: false }); + data.append(FP16x16 { mag: 32112, sign: true }); + data.append(FP16x16 { mag: 23172, sign: false }); + data.append(FP16x16 { mag: 71023, sign: true }); + data.append(FP16x16 { mag: 16348, sign: false }); + data.append(FP16x16 { mag: 28131, sign: false }); + data.append(FP16x16 { mag: 68181, sign: false }); + data.append(FP16x16 { mag: 192465, sign: false }); + data.append(FP16x16 { mag: 22889, sign: false }); + data.append(FP16x16 { mag: 16486, sign: false }); + data.append(FP16x16 { mag: 105533, sign: false }); + data.append(FP16x16 { mag: 6657, sign: true }); + data.append(FP16x16 { mag: 37792, sign: true }); + data.append(FP16x16 { mag: 48157, sign: true }); + data.append(FP16x16 { mag: 66040, sign: false }); + data.append(FP16x16 { mag: 12294, sign: true }); + data.append(FP16x16 { mag: 60649, sign: true }); + data.append(FP16x16 { mag: 42113, sign: true }); + data.append(FP16x16 { mag: 88843, sign: false }); + data.append(FP16x16 { mag: 3531, sign: false }); + data.append(FP16x16 { mag: 2708, sign: false }); + data.append(FP16x16 { mag: 93290, sign: true }); + data.append(FP16x16 { mag: 128, sign: false }); + data.append(FP16x16 { mag: 119266, sign: true }); + data.append(FP16x16 { mag: 37523, sign: false }); + data.append(FP16x16 { mag: 74254, sign: true }); + data.append(FP16x16 { mag: 155781, sign: true }); + data.append(FP16x16 { mag: 39485, sign: false }); + data.append(FP16x16 { mag: 16925, sign: false }); + data.append(FP16x16 { mag: 143362, sign: false }); + data.append(FP16x16 { mag: 77348, sign: false }); + data.append(FP16x16 { mag: 61642, sign: true }); + data.append(FP16x16 { mag: 134754, sign: false }); + data.append(FP16x16 { mag: 65160, sign: false }); + data.append(FP16x16 { mag: 48174, sign: true }); + data.append(FP16x16 { mag: 23831, sign: false }); + data.append(FP16x16 { mag: 70016, sign: true }); + data.append(FP16x16 { mag: 38627, sign: true }); + data.append(FP16x16 { mag: 71569, sign: true }); + data.append(FP16x16 { mag: 53240, sign: false }); + data.append(FP16x16 { mag: 123500, sign: false }); + data.append(FP16x16 { mag: 38397, sign: false }); + data.append(FP16x16 { mag: 105854, sign: false }); + data.append(FP16x16 { mag: 151153, sign: false }); + data.append(FP16x16 { mag: 30204, sign: false }); + data.append(FP16x16 { mag: 85564, sign: true }); + data.append(FP16x16 { mag: 13085, sign: false }); + data.append(FP16x16 { mag: 55281, sign: true }); + data.append(FP16x16 { mag: 11452, sign: false }); + data.append(FP16x16 { mag: 94698, sign: false }); + data.append(FP16x16 { mag: 113647, sign: false }); + data.append(FP16x16 { mag: 72099, sign: false }); + data.append(FP16x16 { mag: 9025, sign: false }); + data.append(FP16x16 { mag: 11985, sign: true }); + data.append(FP16x16 { mag: 159746, sign: false }); + data.append(FP16x16 { mag: 19273, sign: true }); + data.append(FP16x16 { mag: 23362, sign: false }); + data.append(FP16x16 { mag: 11488, sign: true }); + data.append(FP16x16 { mag: 86897, sign: true }); + data.append(FP16x16 { mag: 17484, sign: false }); + data.append(FP16x16 { mag: 35937, sign: false }); + data.append(FP16x16 { mag: 16572, sign: true }); + data.append(FP16x16 { mag: 47800, sign: true }); + data.append(FP16x16 { mag: 134172, sign: true }); + data.append(FP16x16 { mag: 14711, sign: true }); + data.append(FP16x16 { mag: 70361, sign: true }); + data.append(FP16x16 { mag: 33675, sign: true }); + data.append(FP16x16 { mag: 8412, sign: true }); + data.append(FP16x16 { mag: 59966, sign: true }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_2d_same_upper_default/output_0.cairo b/tests/nodes/maxpool_2d_same_upper_default/output_0.cairo new file mode 100644 index 000000000..74103d3a1 --- /dev/null +++ b/tests/nodes/maxpool_2d_same_upper_default/output_0.cairo @@ -0,0 +1,207 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(3); + shape.append(8); + shape.append(8); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 28547, sign: false }); + data.append(FP16x16 { mag: 88529, sign: false }); + data.append(FP16x16 { mag: 88529, sign: false }); + data.append(FP16x16 { mag: 2290, sign: false }); + data.append(FP16x16 { mag: 92642, sign: false }); + data.append(FP16x16 { mag: 92642, sign: false }); + data.append(FP16x16 { mag: 25259, sign: false }); + data.append(FP16x16 { mag: 8291, sign: false }); + data.append(FP16x16 { mag: 17967, sign: false }); + data.append(FP16x16 { mag: 88529, sign: false }); + data.append(FP16x16 { mag: 88529, sign: false }); + data.append(FP16x16 { mag: 59101, sign: false }); + data.append(FP16x16 { mag: 92642, sign: false }); + data.append(FP16x16 { mag: 92642, sign: false }); + data.append(FP16x16 { mag: 25259, sign: false }); + data.append(FP16x16 { mag: 17818, sign: false }); + data.append(FP16x16 { mag: 2693, sign: false }); + data.append(FP16x16 { mag: 12737, sign: true }); + data.append(FP16x16 { mag: 65213, sign: false }); + data.append(FP16x16 { mag: 65213, sign: false }); + data.append(FP16x16 { mag: 59101, sign: false }); + data.append(FP16x16 { mag: 49974, sign: false }); + data.append(FP16x16 { mag: 117996, sign: false }); + data.append(FP16x16 { mag: 117996, sign: false }); + data.append(FP16x16 { mag: 43197, sign: false }); + data.append(FP16x16 { mag: 165019, sign: false }); + data.append(FP16x16 { mag: 165019, sign: false }); + data.append(FP16x16 { mag: 65213, sign: false }); + data.append(FP16x16 { mag: 49974, sign: false }); + data.append(FP16x16 { mag: 49974, sign: false }); + data.append(FP16x16 { mag: 117996, sign: false }); + data.append(FP16x16 { mag: 117996, sign: false }); + data.append(FP16x16 { mag: 43197, sign: false }); + data.append(FP16x16 { mag: 165019, sign: false }); + data.append(FP16x16 { mag: 165019, sign: false }); + data.append(FP16x16 { mag: 38570, sign: false }); + data.append(FP16x16 { mag: 163610, sign: false }); + data.append(FP16x16 { mag: 163610, sign: false }); + data.append(FP16x16 { mag: 47426, sign: false }); + data.append(FP16x16 { mag: 47426, sign: false }); + data.append(FP16x16 { mag: 27879, sign: false }); + data.append(FP16x16 { mag: 100009, sign: false }); + data.append(FP16x16 { mag: 100009, sign: false }); + data.append(FP16x16 { mag: 38570, sign: false }); + data.append(FP16x16 { mag: 163610, sign: false }); + data.append(FP16x16 { mag: 163610, sign: false }); + data.append(FP16x16 { mag: 46610, sign: false }); + data.append(FP16x16 { mag: 46610, sign: false }); + data.append(FP16x16 { mag: 27879, sign: false }); + data.append(FP16x16 { mag: 28554, sign: false }); + data.append(FP16x16 { mag: 68574, sign: false }); + data.append(FP16x16 { mag: 68574, sign: false }); + data.append(FP16x16 { mag: 77435, sign: false }); + data.append(FP16x16 { mag: 77435, sign: false }); + data.append(FP16x16 { mag: 2255, sign: true }); + data.append(FP16x16 { mag: 2255, sign: true }); + data.append(FP16x16 { mag: 27154, sign: false }); + data.append(FP16x16 { mag: 28554, sign: false }); + data.append(FP16x16 { mag: 68574, sign: false }); + data.append(FP16x16 { mag: 68574, sign: false }); + data.append(FP16x16 { mag: 10013, sign: false }); + data.append(FP16x16 { mag: 51289, sign: true }); + data.append(FP16x16 { mag: 51289, sign: true }); + data.append(FP16x16 { mag: 104200, sign: true }); + data.append(FP16x16 { mag: 49102, sign: false }); + data.append(FP16x16 { mag: 73473, sign: false }); + data.append(FP16x16 { mag: 73473, sign: false }); + data.append(FP16x16 { mag: 45735, sign: false }); + data.append(FP16x16 { mag: 73626, sign: false }); + data.append(FP16x16 { mag: 73626, sign: false }); + data.append(FP16x16 { mag: 54632, sign: false }); + data.append(FP16x16 { mag: 17482, sign: true }); + data.append(FP16x16 { mag: 20472, sign: false }); + data.append(FP16x16 { mag: 20472, sign: false }); + data.append(FP16x16 { mag: 19723, sign: false }); + data.append(FP16x16 { mag: 45735, sign: false }); + data.append(FP16x16 { mag: 73626, sign: false }); + data.append(FP16x16 { mag: 73626, sign: false }); + data.append(FP16x16 { mag: 52619, sign: false }); + data.append(FP16x16 { mag: 52619, sign: false }); + data.append(FP16x16 { mag: 4590, sign: false }); + data.append(FP16x16 { mag: 7840, sign: false }); + data.append(FP16x16 { mag: 7840, sign: false }); + data.append(FP16x16 { mag: 55620, sign: false }); + data.append(FP16x16 { mag: 56413, sign: false }); + data.append(FP16x16 { mag: 56413, sign: false }); + data.append(FP16x16 { mag: 52619, sign: false }); + data.append(FP16x16 { mag: 52619, sign: false }); + data.append(FP16x16 { mag: 13670, sign: false }); + data.append(FP16x16 { mag: 4231, sign: true }); + data.append(FP16x16 { mag: 60663, sign: false }); + data.append(FP16x16 { mag: 60663, sign: false }); + data.append(FP16x16 { mag: 55620, sign: false }); + data.append(FP16x16 { mag: 13408, sign: false }); + data.append(FP16x16 { mag: 13408, sign: false }); + data.append(FP16x16 { mag: 6926, sign: true }); + data.append(FP16x16 { mag: 13670, sign: false }); + data.append(FP16x16 { mag: 3327, sign: true }); + data.append(FP16x16 { mag: 60663, sign: false }); + data.append(FP16x16 { mag: 60663, sign: false }); + data.append(FP16x16 { mag: 26283, sign: false }); + data.append(FP16x16 { mag: 109207, sign: false }); + data.append(FP16x16 { mag: 109207, sign: false }); + data.append(FP16x16 { mag: 13215, sign: false }); + data.append(FP16x16 { mag: 59562, sign: false }); + data.append(FP16x16 { mag: 59562, sign: false }); + data.append(FP16x16 { mag: 162194, sign: false }); + data.append(FP16x16 { mag: 162194, sign: false }); + data.append(FP16x16 { mag: 61452, sign: false }); + data.append(FP16x16 { mag: 109207, sign: false }); + data.append(FP16x16 { mag: 109207, sign: false }); + data.append(FP16x16 { mag: 47813, sign: false }); + data.append(FP16x16 { mag: 163638, sign: false }); + data.append(FP16x16 { mag: 163638, sign: false }); + data.append(FP16x16 { mag: 162194, sign: false }); + data.append(FP16x16 { mag: 162194, sign: false }); + data.append(FP16x16 { mag: 61452, sign: false }); + data.append(FP16x16 { mag: 50927, sign: false }); + data.append(FP16x16 { mag: 50927, sign: false }); + data.append(FP16x16 { mag: 47813, sign: false }); + data.append(FP16x16 { mag: 163638, sign: false }); + data.append(FP16x16 { mag: 163638, sign: false }); + data.append(FP16x16 { mag: 90475, sign: false }); + data.append(FP16x16 { mag: 90475, sign: false }); + data.append(FP16x16 { mag: 23172, sign: false }); + data.append(FP16x16 { mag: 23172, sign: false }); + data.append(FP16x16 { mag: 16348, sign: false }); + data.append(FP16x16 { mag: 16348, sign: false }); + data.append(FP16x16 { mag: 68181, sign: false }); + data.append(FP16x16 { mag: 192465, sign: false }); + data.append(FP16x16 { mag: 192465, sign: false }); + data.append(FP16x16 { mag: 22889, sign: false }); + data.append(FP16x16 { mag: 105533, sign: false }); + data.append(FP16x16 { mag: 105533, sign: false }); + data.append(FP16x16 { mag: 3531, sign: false }); + data.append(FP16x16 { mag: 2708, sign: false }); + data.append(FP16x16 { mag: 66040, sign: false }); + data.append(FP16x16 { mag: 66040, sign: false }); + data.append(FP16x16 { mag: 37523, sign: false }); + data.append(FP16x16 { mag: 37523, sign: false }); + data.append(FP16x16 { mag: 88843, sign: false }); + data.append(FP16x16 { mag: 88843, sign: false }); + data.append(FP16x16 { mag: 39485, sign: false }); + data.append(FP16x16 { mag: 16925, sign: false }); + data.append(FP16x16 { mag: 143362, sign: false }); + data.append(FP16x16 { mag: 77348, sign: false }); + data.append(FP16x16 { mag: 134754, sign: false }); + data.append(FP16x16 { mag: 134754, sign: false }); + data.append(FP16x16 { mag: 65160, sign: false }); + data.append(FP16x16 { mag: 39485, sign: false }); + data.append(FP16x16 { mag: 39485, sign: false }); + data.append(FP16x16 { mag: 16925, sign: false }); + data.append(FP16x16 { mag: 143362, sign: false }); + data.append(FP16x16 { mag: 77348, sign: false }); + data.append(FP16x16 { mag: 134754, sign: false }); + data.append(FP16x16 { mag: 134754, sign: false }); + data.append(FP16x16 { mag: 105854, sign: false }); + data.append(FP16x16 { mag: 151153, sign: false }); + data.append(FP16x16 { mag: 151153, sign: false }); + data.append(FP16x16 { mag: 30204, sign: false }); + data.append(FP16x16 { mag: 13085, sign: false }); + data.append(FP16x16 { mag: 53240, sign: false }); + data.append(FP16x16 { mag: 123500, sign: false }); + data.append(FP16x16 { mag: 123500, sign: false }); + data.append(FP16x16 { mag: 113647, sign: false }); + data.append(FP16x16 { mag: 151153, sign: false }); + data.append(FP16x16 { mag: 151153, sign: false }); + data.append(FP16x16 { mag: 30204, sign: false }); + data.append(FP16x16 { mag: 159746, sign: false }); + data.append(FP16x16 { mag: 159746, sign: false }); + data.append(FP16x16 { mag: 23362, sign: false }); + data.append(FP16x16 { mag: 94698, sign: false }); + data.append(FP16x16 { mag: 113647, sign: false }); + data.append(FP16x16 { mag: 113647, sign: false }); + data.append(FP16x16 { mag: 72099, sign: false }); + data.append(FP16x16 { mag: 35937, sign: false }); + data.append(FP16x16 { mag: 159746, sign: false }); + data.append(FP16x16 { mag: 159746, sign: false }); + data.append(FP16x16 { mag: 23362, sign: false }); + data.append(FP16x16 { mag: 23362, sign: false }); + data.append(FP16x16 { mag: 11488, sign: true }); + data.append(FP16x16 { mag: 17484, sign: false }); + data.append(FP16x16 { mag: 35937, sign: false }); + data.append(FP16x16 { mag: 35937, sign: false }); + data.append(FP16x16 { mag: 16572, sign: true }); + data.append(FP16x16 { mag: 47800, sign: true }); + data.append(FP16x16 { mag: 14711, sign: true }); + data.append(FP16x16 { mag: 14711, sign: true }); + data.append(FP16x16 { mag: 33675, sign: true }); + data.append(FP16x16 { mag: 8412, sign: true }); + data.append(FP16x16 { mag: 8412, sign: true }); + data.append(FP16x16 { mag: 59966, sign: true }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_3d_dilations.cairo b/tests/nodes/maxpool_3d_dilations.cairo new file mode 100644 index 000000000..f7aafbfc7 --- /dev/null +++ b/tests/nodes/maxpool_3d_dilations.cairo @@ -0,0 +1,30 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_maxpool_3d_dilations() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let (y_0, _) = NNTrait::max_pool( + @input_0, + Option::None, + Option::None, + Option::Some(array![2, 2, 2].span()), + array![2, 2, 2].span(), + Option::None, + Option::None, + Option::Some(array![1, 1, 1].span()), + 1 + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/maxpool_3d_dilations/input_0.cairo b/tests/nodes/maxpool_3d_dilations/input_0.cairo new file mode 100644 index 000000000..a094e9312 --- /dev/null +++ b/tests/nodes/maxpool_3d_dilations/input_0.cairo @@ -0,0 +1,80 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_3d_dilations/output_0.cairo b/tests/nodes/maxpool_3d_dilations/output_0.cairo new file mode 100644 index 000000000..c755b5052 --- /dev/null +++ b/tests/nodes/maxpool_3d_dilations/output_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_4d_dilations.cairo b/tests/nodes/maxpool_4d_dilations.cairo new file mode 100644 index 000000000..0ad3072ab --- /dev/null +++ b/tests/nodes/maxpool_4d_dilations.cairo @@ -0,0 +1,30 @@ +mod input_0; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::FP16x16NN; + +#[test] +#[available_gas(2000000000)] +fn test_maxpool_4d_dilations() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let (y_0, _) = NNTrait::max_pool( + @input_0, + Option::None, + Option::None, + Option::Some(array![2, 2, 2, 2].span()), + array![2, 2, 2, 2].span(), + Option::None, + Option::None, + Option::None, + 1 + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/maxpool_4d_dilations/input_0.cairo b/tests/nodes/maxpool_4d_dilations/input_0.cairo new file mode 100644 index 000000000..3716a1363 --- /dev/null +++ b/tests/nodes/maxpool_4d_dilations/input_0.cairo @@ -0,0 +1,785 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(3); + shape.append(4); + shape.append(4); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 57326, sign: true }); + data.append(FP16x16 { mag: 21730, sign: true }); + data.append(FP16x16 { mag: 42582, sign: false }); + data.append(FP16x16 { mag: 39862, sign: false }); + data.append(FP16x16 { mag: 33772, sign: false }); + data.append(FP16x16 { mag: 85813, sign: false }); + data.append(FP16x16 { mag: 53862, sign: false }); + data.append(FP16x16 { mag: 172442, sign: false }); + data.append(FP16x16 { mag: 49149, sign: false }); + data.append(FP16x16 { mag: 22905, sign: true }); + data.append(FP16x16 { mag: 33282, sign: true }); + data.append(FP16x16 { mag: 16327, sign: true }); + data.append(FP16x16 { mag: 107261, sign: true }); + data.append(FP16x16 { mag: 46347, sign: true }); + data.append(FP16x16 { mag: 6995, sign: true }); + data.append(FP16x16 { mag: 24181, sign: false }); + data.append(FP16x16 { mag: 104507, sign: false }); + data.append(FP16x16 { mag: 30885, sign: false }); + data.append(FP16x16 { mag: 171591, sign: false }); + data.append(FP16x16 { mag: 11569, sign: true }); + data.append(FP16x16 { mag: 8237, sign: false }); + data.append(FP16x16 { mag: 36041, sign: false }); + data.append(FP16x16 { mag: 70645, sign: true }); + data.append(FP16x16 { mag: 193719, sign: false }); + data.append(FP16x16 { mag: 69351, sign: true }); + data.append(FP16x16 { mag: 28229, sign: false }); + data.append(FP16x16 { mag: 52027, sign: true }); + data.append(FP16x16 { mag: 13767, sign: true }); + data.append(FP16x16 { mag: 11650, sign: false }); + data.append(FP16x16 { mag: 10048, sign: true }); + data.append(FP16x16 { mag: 101266, sign: false }); + data.append(FP16x16 { mag: 61859, sign: true }); + data.append(FP16x16 { mag: 21139, sign: true }); + data.append(FP16x16 { mag: 77012, sign: false }); + data.append(FP16x16 { mag: 10808, sign: false }); + data.append(FP16x16 { mag: 38568, sign: true }); + data.append(FP16x16 { mag: 117829, sign: false }); + data.append(FP16x16 { mag: 65427, sign: true }); + data.append(FP16x16 { mag: 32883, sign: false }); + data.append(FP16x16 { mag: 88408, sign: true }); + data.append(FP16x16 { mag: 30189, sign: true }); + data.append(FP16x16 { mag: 134656, sign: true }); + data.append(FP16x16 { mag: 63780, sign: true }); + data.append(FP16x16 { mag: 26968, sign: false }); + data.append(FP16x16 { mag: 17303, sign: false }); + data.append(FP16x16 { mag: 58933, sign: true }); + data.append(FP16x16 { mag: 22852, sign: false }); + data.append(FP16x16 { mag: 46707, sign: true }); + data.append(FP16x16 { mag: 44311, sign: true }); + data.append(FP16x16 { mag: 67011, sign: true }); + data.append(FP16x16 { mag: 17975, sign: true }); + data.append(FP16x16 { mag: 19940, sign: true }); + data.append(FP16x16 { mag: 26874, sign: false }); + data.append(FP16x16 { mag: 77148, sign: true }); + data.append(FP16x16 { mag: 1260, sign: false }); + data.append(FP16x16 { mag: 39435, sign: true }); + data.append(FP16x16 { mag: 57637, sign: true }); + data.append(FP16x16 { mag: 70584, sign: true }); + data.append(FP16x16 { mag: 90946, sign: false }); + data.append(FP16x16 { mag: 120636, sign: true }); + data.append(FP16x16 { mag: 44169, sign: false }); + data.append(FP16x16 { mag: 20822, sign: true }); + data.append(FP16x16 { mag: 9217, sign: true }); + data.append(FP16x16 { mag: 56642, sign: true }); + data.append(FP16x16 { mag: 32615, sign: false }); + data.append(FP16x16 { mag: 56213, sign: false }); + data.append(FP16x16 { mag: 20292, sign: true }); + data.append(FP16x16 { mag: 67358, sign: true }); + data.append(FP16x16 { mag: 94393, sign: true }); + data.append(FP16x16 { mag: 39525, sign: true }); + data.append(FP16x16 { mag: 117822, sign: false }); + data.append(FP16x16 { mag: 63929, sign: false }); + data.append(FP16x16 { mag: 2412, sign: true }); + data.append(FP16x16 { mag: 35782, sign: true }); + data.append(FP16x16 { mag: 42346, sign: true }); + data.append(FP16x16 { mag: 57619, sign: true }); + data.append(FP16x16 { mag: 76605, sign: false }); + data.append(FP16x16 { mag: 55629, sign: true }); + data.append(FP16x16 { mag: 26508, sign: true }); + data.append(FP16x16 { mag: 31496, sign: true }); + data.append(FP16x16 { mag: 29971, sign: false }); + data.append(FP16x16 { mag: 1317, sign: true }); + data.append(FP16x16 { mag: 86516, sign: true }); + data.append(FP16x16 { mag: 48878, sign: true }); + data.append(FP16x16 { mag: 91296, sign: true }); + data.append(FP16x16 { mag: 80537, sign: false }); + data.append(FP16x16 { mag: 143126, sign: true }); + data.append(FP16x16 { mag: 35843, sign: false }); + data.append(FP16x16 { mag: 794, sign: true }); + data.append(FP16x16 { mag: 4465, sign: true }); + data.append(FP16x16 { mag: 44772, sign: false }); + data.append(FP16x16 { mag: 114134, sign: false }); + data.append(FP16x16 { mag: 34012, sign: true }); + data.append(FP16x16 { mag: 81192, sign: false }); + data.append(FP16x16 { mag: 68824, sign: false }); + data.append(FP16x16 { mag: 35529, sign: true }); + data.append(FP16x16 { mag: 48725, sign: true }); + data.append(FP16x16 { mag: 48274, sign: true }); + data.append(FP16x16 { mag: 97602, sign: false }); + data.append(FP16x16 { mag: 18341, sign: true }); + data.append(FP16x16 { mag: 31756, sign: false }); + data.append(FP16x16 { mag: 79390, sign: false }); + data.append(FP16x16 { mag: 72973, sign: false }); + data.append(FP16x16 { mag: 1249, sign: true }); + data.append(FP16x16 { mag: 6551, sign: false }); + data.append(FP16x16 { mag: 41473, sign: false }); + data.append(FP16x16 { mag: 6590, sign: true }); + data.append(FP16x16 { mag: 31148, sign: true }); + data.append(FP16x16 { mag: 54083, sign: false }); + data.append(FP16x16 { mag: 11225, sign: false }); + data.append(FP16x16 { mag: 27239, sign: false }); + data.append(FP16x16 { mag: 26555, sign: false }); + data.append(FP16x16 { mag: 17820, sign: true }); + data.append(FP16x16 { mag: 122, sign: true }); + data.append(FP16x16 { mag: 67786, sign: false }); + data.append(FP16x16 { mag: 71452, sign: true }); + data.append(FP16x16 { mag: 84467, sign: true }); + data.append(FP16x16 { mag: 38713, sign: true }); + data.append(FP16x16 { mag: 84330, sign: false }); + data.append(FP16x16 { mag: 37483, sign: false }); + data.append(FP16x16 { mag: 93738, sign: false }); + data.append(FP16x16 { mag: 5609, sign: false }); + data.append(FP16x16 { mag: 32866, sign: true }); + data.append(FP16x16 { mag: 112546, sign: true }); + data.append(FP16x16 { mag: 109193, sign: true }); + data.append(FP16x16 { mag: 4021, sign: false }); + data.append(FP16x16 { mag: 57369, sign: false }); + data.append(FP16x16 { mag: 26880, sign: true }); + data.append(FP16x16 { mag: 28676, sign: false }); + data.append(FP16x16 { mag: 21352, sign: false }); + data.append(FP16x16 { mag: 19899, sign: false }); + data.append(FP16x16 { mag: 25250, sign: false }); + data.append(FP16x16 { mag: 17564, sign: false }); + data.append(FP16x16 { mag: 73191, sign: true }); + data.append(FP16x16 { mag: 15444, sign: false }); + data.append(FP16x16 { mag: 3166, sign: false }); + data.append(FP16x16 { mag: 58258, sign: true }); + data.append(FP16x16 { mag: 20879, sign: true }); + data.append(FP16x16 { mag: 39604, sign: false }); + data.append(FP16x16 { mag: 81651, sign: true }); + data.append(FP16x16 { mag: 91451, sign: false }); + data.append(FP16x16 { mag: 58072, sign: false }); + data.append(FP16x16 { mag: 131524, sign: false }); + data.append(FP16x16 { mag: 32727, sign: false }); + data.append(FP16x16 { mag: 165314, sign: false }); + data.append(FP16x16 { mag: 4141, sign: true }); + data.append(FP16x16 { mag: 14668, sign: false }); + data.append(FP16x16 { mag: 124609, sign: false }); + data.append(FP16x16 { mag: 33880, sign: false }); + data.append(FP16x16 { mag: 180279, sign: false }); + data.append(FP16x16 { mag: 23806, sign: true }); + data.append(FP16x16 { mag: 33978, sign: false }); + data.append(FP16x16 { mag: 102278, sign: false }); + data.append(FP16x16 { mag: 91608, sign: true }); + data.append(FP16x16 { mag: 81589, sign: false }); + data.append(FP16x16 { mag: 55843, sign: true }); + data.append(FP16x16 { mag: 70024, sign: true }); + data.append(FP16x16 { mag: 30011, sign: true }); + data.append(FP16x16 { mag: 18673, sign: false }); + data.append(FP16x16 { mag: 17510, sign: true }); + data.append(FP16x16 { mag: 94591, sign: true }); + data.append(FP16x16 { mag: 83587, sign: true }); + data.append(FP16x16 { mag: 80327, sign: true }); + data.append(FP16x16 { mag: 58704, sign: true }); + data.append(FP16x16 { mag: 12647, sign: true }); + data.append(FP16x16 { mag: 119066, sign: false }); + data.append(FP16x16 { mag: 24450, sign: true }); + data.append(FP16x16 { mag: 73352, sign: true }); + data.append(FP16x16 { mag: 37058, sign: false }); + data.append(FP16x16 { mag: 21557, sign: false }); + data.append(FP16x16 { mag: 20631, sign: true }); + data.append(FP16x16 { mag: 93230, sign: false }); + data.append(FP16x16 { mag: 49743, sign: true }); + data.append(FP16x16 { mag: 33398, sign: false }); + data.append(FP16x16 { mag: 30495, sign: false }); + data.append(FP16x16 { mag: 69280, sign: true }); + data.append(FP16x16 { mag: 11759, sign: true }); + data.append(FP16x16 { mag: 87054, sign: false }); + data.append(FP16x16 { mag: 66395, sign: false }); + data.append(FP16x16 { mag: 20819, sign: false }); + data.append(FP16x16 { mag: 107561, sign: false }); + data.append(FP16x16 { mag: 24839, sign: false }); + data.append(FP16x16 { mag: 101166, sign: true }); + data.append(FP16x16 { mag: 4674, sign: true }); + data.append(FP16x16 { mag: 15294, sign: false }); + data.append(FP16x16 { mag: 14943, sign: false }); + data.append(FP16x16 { mag: 7494, sign: false }); + data.append(FP16x16 { mag: 83926, sign: false }); + data.append(FP16x16 { mag: 127871, sign: true }); + data.append(FP16x16 { mag: 87804, sign: false }); + data.append(FP16x16 { mag: 79905, sign: false }); + data.append(FP16x16 { mag: 82486, sign: false }); + data.append(FP16x16 { mag: 23065, sign: true }); + data.append(FP16x16 { mag: 14517, sign: false }); + data.append(FP16x16 { mag: 146450, sign: true }); + data.append(FP16x16 { mag: 78522, sign: true }); + data.append(FP16x16 { mag: 50973, sign: false }); + data.append(FP16x16 { mag: 59565, sign: false }); + data.append(FP16x16 { mag: 75649, sign: false }); + data.append(FP16x16 { mag: 37180, sign: false }); + data.append(FP16x16 { mag: 10656, sign: true }); + data.append(FP16x16 { mag: 20803, sign: false }); + data.append(FP16x16 { mag: 19563, sign: true }); + data.append(FP16x16 { mag: 27034, sign: false }); + data.append(FP16x16 { mag: 14241, sign: true }); + data.append(FP16x16 { mag: 27342, sign: false }); + data.append(FP16x16 { mag: 15449, sign: false }); + data.append(FP16x16 { mag: 54417, sign: false }); + data.append(FP16x16 { mag: 28698, sign: false }); + data.append(FP16x16 { mag: 10291, sign: false }); + data.append(FP16x16 { mag: 78630, sign: true }); + data.append(FP16x16 { mag: 54471, sign: false }); + data.append(FP16x16 { mag: 63909, sign: false }); + data.append(FP16x16 { mag: 155625, sign: true }); + data.append(FP16x16 { mag: 46452, sign: true }); + data.append(FP16x16 { mag: 85081, sign: true }); + data.append(FP16x16 { mag: 149800, sign: true }); + data.append(FP16x16 { mag: 83814, sign: true }); + data.append(FP16x16 { mag: 13220, sign: true }); + data.append(FP16x16 { mag: 105580, sign: true }); + data.append(FP16x16 { mag: 45775, sign: false }); + data.append(FP16x16 { mag: 81027, sign: false }); + data.append(FP16x16 { mag: 45746, sign: false }); + data.append(FP16x16 { mag: 20454, sign: true }); + data.append(FP16x16 { mag: 54633, sign: false }); + data.append(FP16x16 { mag: 58097, sign: false }); + data.append(FP16x16 { mag: 51893, sign: true }); + data.append(FP16x16 { mag: 537, sign: false }); + data.append(FP16x16 { mag: 47389, sign: false }); + data.append(FP16x16 { mag: 7725, sign: false }); + data.append(FP16x16 { mag: 64559, sign: false }); + data.append(FP16x16 { mag: 120617, sign: true }); + data.append(FP16x16 { mag: 13069, sign: true }); + data.append(FP16x16 { mag: 13012, sign: false }); + data.append(FP16x16 { mag: 64929, sign: false }); + data.append(FP16x16 { mag: 13880, sign: true }); + data.append(FP16x16 { mag: 29696, sign: false }); + data.append(FP16x16 { mag: 23484, sign: true }); + data.append(FP16x16 { mag: 78538, sign: true }); + data.append(FP16x16 { mag: 108676, sign: true }); + data.append(FP16x16 { mag: 30019, sign: false }); + data.append(FP16x16 { mag: 17931, sign: true }); + data.append(FP16x16 { mag: 27085, sign: false }); + data.append(FP16x16 { mag: 24850, sign: false }); + data.append(FP16x16 { mag: 12153, sign: false }); + data.append(FP16x16 { mag: 11538, sign: true }); + data.append(FP16x16 { mag: 69066, sign: true }); + data.append(FP16x16 { mag: 12725, sign: false }); + data.append(FP16x16 { mag: 37986, sign: false }); + data.append(FP16x16 { mag: 53389, sign: true }); + data.append(FP16x16 { mag: 114518, sign: true }); + data.append(FP16x16 { mag: 74200, sign: true }); + data.append(FP16x16 { mag: 54403, sign: true }); + data.append(FP16x16 { mag: 31396, sign: false }); + data.append(FP16x16 { mag: 40194, sign: false }); + data.append(FP16x16 { mag: 9643, sign: false }); + data.append(FP16x16 { mag: 29485, sign: true }); + data.append(FP16x16 { mag: 12679, sign: false }); + data.append(FP16x16 { mag: 15850, sign: true }); + data.append(FP16x16 { mag: 39421, sign: true }); + data.append(FP16x16 { mag: 88854, sign: false }); + data.append(FP16x16 { mag: 1130, sign: false }); + data.append(FP16x16 { mag: 16366, sign: true }); + data.append(FP16x16 { mag: 33087, sign: true }); + data.append(FP16x16 { mag: 28745, sign: true }); + data.append(FP16x16 { mag: 3488, sign: false }); + data.append(FP16x16 { mag: 59008, sign: true }); + data.append(FP16x16 { mag: 4364, sign: false }); + data.append(FP16x16 { mag: 42280, sign: true }); + data.append(FP16x16 { mag: 100073, sign: true }); + data.append(FP16x16 { mag: 4958, sign: true }); + data.append(FP16x16 { mag: 37532, sign: true }); + data.append(FP16x16 { mag: 30930, sign: false }); + data.append(FP16x16 { mag: 29580, sign: true }); + data.append(FP16x16 { mag: 930, sign: true }); + data.append(FP16x16 { mag: 4486, sign: true }); + data.append(FP16x16 { mag: 82232, sign: true }); + data.append(FP16x16 { mag: 47028, sign: true }); + data.append(FP16x16 { mag: 8929, sign: false }); + data.append(FP16x16 { mag: 70968, sign: false }); + data.append(FP16x16 { mag: 38614, sign: true }); + data.append(FP16x16 { mag: 57990, sign: true }); + data.append(FP16x16 { mag: 51540, sign: true }); + data.append(FP16x16 { mag: 15430, sign: true }); + data.append(FP16x16 { mag: 60366, sign: true }); + data.append(FP16x16 { mag: 26896, sign: false }); + data.append(FP16x16 { mag: 55881, sign: false }); + data.append(FP16x16 { mag: 37730, sign: false }); + data.append(FP16x16 { mag: 1519, sign: true }); + data.append(FP16x16 { mag: 41072, sign: true }); + data.append(FP16x16 { mag: 70626, sign: true }); + data.append(FP16x16 { mag: 80140, sign: false }); + data.append(FP16x16 { mag: 48666, sign: true }); + data.append(FP16x16 { mag: 63754, sign: true }); + data.append(FP16x16 { mag: 64494, sign: true }); + data.append(FP16x16 { mag: 75672, sign: false }); + data.append(FP16x16 { mag: 39394, sign: true }); + data.append(FP16x16 { mag: 39881, sign: false }); + data.append(FP16x16 { mag: 23064, sign: true }); + data.append(FP16x16 { mag: 14821, sign: false }); + data.append(FP16x16 { mag: 27344, sign: true }); + data.append(FP16x16 { mag: 14728, sign: false }); + data.append(FP16x16 { mag: 59112, sign: true }); + data.append(FP16x16 { mag: 93610, sign: false }); + data.append(FP16x16 { mag: 80804, sign: true }); + data.append(FP16x16 { mag: 31174, sign: false }); + data.append(FP16x16 { mag: 31687, sign: true }); + data.append(FP16x16 { mag: 92103, sign: false }); + data.append(FP16x16 { mag: 84394, sign: false }); + data.append(FP16x16 { mag: 85999, sign: true }); + data.append(FP16x16 { mag: 93667, sign: true }); + data.append(FP16x16 { mag: 124722, sign: false }); + data.append(FP16x16 { mag: 29792, sign: true }); + data.append(FP16x16 { mag: 1719, sign: true }); + data.append(FP16x16 { mag: 10265, sign: false }); + data.append(FP16x16 { mag: 98163, sign: false }); + data.append(FP16x16 { mag: 44203, sign: false }); + data.append(FP16x16 { mag: 25672, sign: true }); + data.append(FP16x16 { mag: 92499, sign: false }); + data.append(FP16x16 { mag: 71127, sign: false }); + data.append(FP16x16 { mag: 107167, sign: true }); + data.append(FP16x16 { mag: 58795, sign: false }); + data.append(FP16x16 { mag: 33710, sign: false }); + data.append(FP16x16 { mag: 59629, sign: true }); + data.append(FP16x16 { mag: 12862, sign: false }); + data.append(FP16x16 { mag: 77599, sign: true }); + data.append(FP16x16 { mag: 2634, sign: false }); + data.append(FP16x16 { mag: 30404, sign: true }); + data.append(FP16x16 { mag: 34664, sign: false }); + data.append(FP16x16 { mag: 123236, sign: false }); + data.append(FP16x16 { mag: 9552, sign: false }); + data.append(FP16x16 { mag: 1400, sign: true }); + data.append(FP16x16 { mag: 57199, sign: true }); + data.append(FP16x16 { mag: 56490, sign: false }); + data.append(FP16x16 { mag: 15372, sign: false }); + data.append(FP16x16 { mag: 39428, sign: true }); + data.append(FP16x16 { mag: 22812, sign: true }); + data.append(FP16x16 { mag: 131564, sign: false }); + data.append(FP16x16 { mag: 119458, sign: true }); + data.append(FP16x16 { mag: 45170, sign: true }); + data.append(FP16x16 { mag: 59265, sign: false }); + data.append(FP16x16 { mag: 45955, sign: true }); + data.append(FP16x16 { mag: 54566, sign: true }); + data.append(FP16x16 { mag: 38354, sign: false }); + data.append(FP16x16 { mag: 138628, sign: true }); + data.append(FP16x16 { mag: 54243, sign: true }); + data.append(FP16x16 { mag: 47436, sign: false }); + data.append(FP16x16 { mag: 58880, sign: true }); + data.append(FP16x16 { mag: 48381, sign: false }); + data.append(FP16x16 { mag: 33455, sign: false }); + data.append(FP16x16 { mag: 38625, sign: false }); + data.append(FP16x16 { mag: 107342, sign: true }); + data.append(FP16x16 { mag: 46976, sign: false }); + data.append(FP16x16 { mag: 84206, sign: true }); + data.append(FP16x16 { mag: 29355, sign: false }); + data.append(FP16x16 { mag: 63176, sign: false }); + data.append(FP16x16 { mag: 34330, sign: true }); + data.append(FP16x16 { mag: 65025, sign: false }); + data.append(FP16x16 { mag: 25751, sign: true }); + data.append(FP16x16 { mag: 17809, sign: false }); + data.append(FP16x16 { mag: 22890, sign: false }); + data.append(FP16x16 { mag: 57565, sign: true }); + data.append(FP16x16 { mag: 16208, sign: false }); + data.append(FP16x16 { mag: 134370, sign: true }); + data.append(FP16x16 { mag: 58594, sign: false }); + data.append(FP16x16 { mag: 11743, sign: true }); + data.append(FP16x16 { mag: 44532, sign: true }); + data.append(FP16x16 { mag: 117474, sign: true }); + data.append(FP16x16 { mag: 44774, sign: false }); + data.append(FP16x16 { mag: 120360, sign: false }); + data.append(FP16x16 { mag: 28815, sign: false }); + data.append(FP16x16 { mag: 9241, sign: true }); + data.append(FP16x16 { mag: 25010, sign: false }); + data.append(FP16x16 { mag: 26794, sign: true }); + data.append(FP16x16 { mag: 36699, sign: true }); + data.append(FP16x16 { mag: 1476, sign: true }); + data.append(FP16x16 { mag: 39151, sign: true }); + data.append(FP16x16 { mag: 55160, sign: false }); + data.append(FP16x16 { mag: 51197, sign: false }); + data.append(FP16x16 { mag: 41092, sign: false }); + data.append(FP16x16 { mag: 63983, sign: true }); + data.append(FP16x16 { mag: 48252, sign: false }); + data.append(FP16x16 { mag: 51458, sign: false }); + data.append(FP16x16 { mag: 28035, sign: true }); + data.append(FP16x16 { mag: 93421, sign: false }); + data.append(FP16x16 { mag: 46679, sign: true }); + data.append(FP16x16 { mag: 81159, sign: false }); + data.append(FP16x16 { mag: 22207, sign: false }); + data.append(FP16x16 { mag: 24918, sign: false }); + data.append(FP16x16 { mag: 50366, sign: true }); + data.append(FP16x16 { mag: 38928, sign: true }); + data.append(FP16x16 { mag: 2326, sign: true }); + data.append(FP16x16 { mag: 41029, sign: true }); + data.append(FP16x16 { mag: 48971, sign: false }); + data.append(FP16x16 { mag: 102185, sign: true }); + data.append(FP16x16 { mag: 45825, sign: true }); + data.append(FP16x16 { mag: 6231, sign: true }); + data.append(FP16x16 { mag: 5992, sign: true }); + data.append(FP16x16 { mag: 60752, sign: false }); + data.append(FP16x16 { mag: 44154, sign: false }); + data.append(FP16x16 { mag: 58826, sign: true }); + data.append(FP16x16 { mag: 70712, sign: true }); + data.append(FP16x16 { mag: 16729, sign: true }); + data.append(FP16x16 { mag: 122822, sign: true }); + data.append(FP16x16 { mag: 5531, sign: true }); + data.append(FP16x16 { mag: 125650, sign: false }); + data.append(FP16x16 { mag: 116267, sign: false }); + data.append(FP16x16 { mag: 36107, sign: false }); + data.append(FP16x16 { mag: 28157, sign: true }); + data.append(FP16x16 { mag: 39470, sign: false }); + data.append(FP16x16 { mag: 47628, sign: false }); + data.append(FP16x16 { mag: 22813, sign: false }); + data.append(FP16x16 { mag: 48005, sign: false }); + data.append(FP16x16 { mag: 2843, sign: false }); + data.append(FP16x16 { mag: 65208, sign: false }); + data.append(FP16x16 { mag: 42277, sign: false }); + data.append(FP16x16 { mag: 18706, sign: true }); + data.append(FP16x16 { mag: 30606, sign: true }); + data.append(FP16x16 { mag: 27669, sign: true }); + data.append(FP16x16 { mag: 38065, sign: false }); + data.append(FP16x16 { mag: 11748, sign: false }); + data.append(FP16x16 { mag: 37583, sign: true }); + data.append(FP16x16 { mag: 37178, sign: false }); + data.append(FP16x16 { mag: 45096, sign: false }); + data.append(FP16x16 { mag: 120975, sign: true }); + data.append(FP16x16 { mag: 42732, sign: false }); + data.append(FP16x16 { mag: 87833, sign: false }); + data.append(FP16x16 { mag: 25683, sign: true }); + data.append(FP16x16 { mag: 57618, sign: true }); + data.append(FP16x16 { mag: 23177, sign: false }); + data.append(FP16x16 { mag: 5134, sign: true }); + data.append(FP16x16 { mag: 7581, sign: true }); + data.append(FP16x16 { mag: 75724, sign: true }); + data.append(FP16x16 { mag: 64458, sign: true }); + data.append(FP16x16 { mag: 141791, sign: true }); + data.append(FP16x16 { mag: 42154, sign: true }); + data.append(FP16x16 { mag: 12865, sign: false }); + data.append(FP16x16 { mag: 17234, sign: true }); + data.append(FP16x16 { mag: 126303, sign: true }); + data.append(FP16x16 { mag: 93382, sign: true }); + data.append(FP16x16 { mag: 57375, sign: false }); + data.append(FP16x16 { mag: 75818, sign: false }); + data.append(FP16x16 { mag: 5159, sign: false }); + data.append(FP16x16 { mag: 35848, sign: false }); + data.append(FP16x16 { mag: 4853, sign: false }); + data.append(FP16x16 { mag: 57301, sign: true }); + data.append(FP16x16 { mag: 8128, sign: true }); + data.append(FP16x16 { mag: 83696, sign: true }); + data.append(FP16x16 { mag: 136686, sign: false }); + data.append(FP16x16 { mag: 112560, sign: true }); + data.append(FP16x16 { mag: 66658, sign: false }); + data.append(FP16x16 { mag: 28200, sign: true }); + data.append(FP16x16 { mag: 40036, sign: false }); + data.append(FP16x16 { mag: 31169, sign: true }); + data.append(FP16x16 { mag: 174668, sign: false }); + data.append(FP16x16 { mag: 29868, sign: false }); + data.append(FP16x16 { mag: 10093, sign: true }); + data.append(FP16x16 { mag: 74999, sign: false }); + data.append(FP16x16 { mag: 78237, sign: true }); + data.append(FP16x16 { mag: 127676, sign: true }); + data.append(FP16x16 { mag: 22293, sign: true }); + data.append(FP16x16 { mag: 1758, sign: false }); + data.append(FP16x16 { mag: 56726, sign: false }); + data.append(FP16x16 { mag: 133433, sign: true }); + data.append(FP16x16 { mag: 9554, sign: true }); + data.append(FP16x16 { mag: 74809, sign: true }); + data.append(FP16x16 { mag: 17296, sign: true }); + data.append(FP16x16 { mag: 33106, sign: false }); + data.append(FP16x16 { mag: 57029, sign: true }); + data.append(FP16x16 { mag: 67553, sign: false }); + data.append(FP16x16 { mag: 65247, sign: false }); + data.append(FP16x16 { mag: 33811, sign: false }); + data.append(FP16x16 { mag: 37820, sign: false }); + data.append(FP16x16 { mag: 8349, sign: false }); + data.append(FP16x16 { mag: 82711, sign: true }); + data.append(FP16x16 { mag: 6355, sign: true }); + data.append(FP16x16 { mag: 51759, sign: true }); + data.append(FP16x16 { mag: 90465, sign: false }); + data.append(FP16x16 { mag: 63747, sign: false }); + data.append(FP16x16 { mag: 51827, sign: false }); + data.append(FP16x16 { mag: 2074, sign: true }); + data.append(FP16x16 { mag: 7986, sign: true }); + data.append(FP16x16 { mag: 127840, sign: true }); + data.append(FP16x16 { mag: 9095, sign: true }); + data.append(FP16x16 { mag: 39166, sign: true }); + data.append(FP16x16 { mag: 4900, sign: false }); + data.append(FP16x16 { mag: 85400, sign: false }); + data.append(FP16x16 { mag: 20179, sign: true }); + data.append(FP16x16 { mag: 102313, sign: false }); + data.append(FP16x16 { mag: 30560, sign: true }); + data.append(FP16x16 { mag: 23998, sign: true }); + data.append(FP16x16 { mag: 18546, sign: false }); + data.append(FP16x16 { mag: 50624, sign: false }); + data.append(FP16x16 { mag: 28484, sign: false }); + data.append(FP16x16 { mag: 106711, sign: false }); + data.append(FP16x16 { mag: 145610, sign: true }); + data.append(FP16x16 { mag: 45143, sign: true }); + data.append(FP16x16 { mag: 49679, sign: true }); + data.append(FP16x16 { mag: 43769, sign: true }); + data.append(FP16x16 { mag: 70976, sign: false }); + data.append(FP16x16 { mag: 97200, sign: false }); + data.append(FP16x16 { mag: 51404, sign: true }); + data.append(FP16x16 { mag: 5413, sign: false }); + data.append(FP16x16 { mag: 16219, sign: true }); + data.append(FP16x16 { mag: 68769, sign: false }); + data.append(FP16x16 { mag: 14687, sign: true }); + data.append(FP16x16 { mag: 24476, sign: false }); + data.append(FP16x16 { mag: 50297, sign: true }); + data.append(FP16x16 { mag: 21305, sign: true }); + data.append(FP16x16 { mag: 63960, sign: false }); + data.append(FP16x16 { mag: 60809, sign: false }); + data.append(FP16x16 { mag: 28337, sign: true }); + data.append(FP16x16 { mag: 14402, sign: true }); + data.append(FP16x16 { mag: 118874, sign: true }); + data.append(FP16x16 { mag: 76081, sign: false }); + data.append(FP16x16 { mag: 72871, sign: false }); + data.append(FP16x16 { mag: 25750, sign: true }); + data.append(FP16x16 { mag: 9294, sign: true }); + data.append(FP16x16 { mag: 113450, sign: false }); + data.append(FP16x16 { mag: 11901, sign: false }); + data.append(FP16x16 { mag: 130175, sign: true }); + data.append(FP16x16 { mag: 78751, sign: false }); + data.append(FP16x16 { mag: 37596, sign: false }); + data.append(FP16x16 { mag: 15550, sign: true }); + data.append(FP16x16 { mag: 88187, sign: false }); + data.append(FP16x16 { mag: 39156, sign: false }); + data.append(FP16x16 { mag: 105270, sign: false }); + data.append(FP16x16 { mag: 17054, sign: false }); + data.append(FP16x16 { mag: 58134, sign: false }); + data.append(FP16x16 { mag: 81353, sign: false }); + data.append(FP16x16 { mag: 54805, sign: true }); + data.append(FP16x16 { mag: 17538, sign: false }); + data.append(FP16x16 { mag: 78980, sign: false }); + data.append(FP16x16 { mag: 198738, sign: false }); + data.append(FP16x16 { mag: 19403, sign: true }); + data.append(FP16x16 { mag: 34265, sign: false }); + data.append(FP16x16 { mag: 11007, sign: false }); + data.append(FP16x16 { mag: 28307, sign: false }); + data.append(FP16x16 { mag: 119920, sign: true }); + data.append(FP16x16 { mag: 26409, sign: false }); + data.append(FP16x16 { mag: 23076, sign: false }); + data.append(FP16x16 { mag: 34211, sign: false }); + data.append(FP16x16 { mag: 361, sign: true }); + data.append(FP16x16 { mag: 73745, sign: true }); + data.append(FP16x16 { mag: 7717, sign: true }); + data.append(FP16x16 { mag: 12468, sign: false }); + data.append(FP16x16 { mag: 25976, sign: false }); + data.append(FP16x16 { mag: 107153, sign: true }); + data.append(FP16x16 { mag: 68883, sign: false }); + data.append(FP16x16 { mag: 13933, sign: true }); + data.append(FP16x16 { mag: 79850, sign: false }); + data.append(FP16x16 { mag: 77337, sign: true }); + data.append(FP16x16 { mag: 27318, sign: true }); + data.append(FP16x16 { mag: 103120, sign: false }); + data.append(FP16x16 { mag: 12693, sign: true }); + data.append(FP16x16 { mag: 63090, sign: false }); + data.append(FP16x16 { mag: 45507, sign: false }); + data.append(FP16x16 { mag: 52553, sign: true }); + data.append(FP16x16 { mag: 70902, sign: false }); + data.append(FP16x16 { mag: 111650, sign: false }); + data.append(FP16x16 { mag: 23223, sign: false }); + data.append(FP16x16 { mag: 6272, sign: true }); + data.append(FP16x16 { mag: 7037, sign: false }); + data.append(FP16x16 { mag: 74819, sign: true }); + data.append(FP16x16 { mag: 68730, sign: false }); + data.append(FP16x16 { mag: 90482, sign: false }); + data.append(FP16x16 { mag: 16560, sign: true }); + data.append(FP16x16 { mag: 117248, sign: false }); + data.append(FP16x16 { mag: 80344, sign: false }); + data.append(FP16x16 { mag: 73022, sign: true }); + data.append(FP16x16 { mag: 33531, sign: false }); + data.append(FP16x16 { mag: 180033, sign: false }); + data.append(FP16x16 { mag: 46630, sign: true }); + data.append(FP16x16 { mag: 119335, sign: true }); + data.append(FP16x16 { mag: 20067, sign: true }); + data.append(FP16x16 { mag: 36506, sign: false }); + data.append(FP16x16 { mag: 24262, sign: true }); + data.append(FP16x16 { mag: 715, sign: true }); + data.append(FP16x16 { mag: 101984, sign: false }); + data.append(FP16x16 { mag: 22353, sign: false }); + data.append(FP16x16 { mag: 50986, sign: false }); + data.append(FP16x16 { mag: 67113, sign: false }); + data.append(FP16x16 { mag: 55868, sign: false }); + data.append(FP16x16 { mag: 100422, sign: true }); + data.append(FP16x16 { mag: 53193, sign: false }); + data.append(FP16x16 { mag: 51699, sign: false }); + data.append(FP16x16 { mag: 47894, sign: true }); + data.append(FP16x16 { mag: 8082, sign: true }); + data.append(FP16x16 { mag: 32263, sign: true }); + data.append(FP16x16 { mag: 27212, sign: true }); + data.append(FP16x16 { mag: 7226, sign: true }); + data.append(FP16x16 { mag: 30746, sign: false }); + data.append(FP16x16 { mag: 12011, sign: false }); + data.append(FP16x16 { mag: 13020, sign: false }); + data.append(FP16x16 { mag: 7085, sign: false }); + data.append(FP16x16 { mag: 18086, sign: true }); + data.append(FP16x16 { mag: 30286, sign: false }); + data.append(FP16x16 { mag: 20925, sign: true }); + data.append(FP16x16 { mag: 87195, sign: true }); + data.append(FP16x16 { mag: 42790, sign: false }); + data.append(FP16x16 { mag: 8451, sign: false }); + data.append(FP16x16 { mag: 91114, sign: true }); + data.append(FP16x16 { mag: 32647, sign: true }); + data.append(FP16x16 { mag: 64992, sign: true }); + data.append(FP16x16 { mag: 74108, sign: false }); + data.append(FP16x16 { mag: 15790, sign: true }); + data.append(FP16x16 { mag: 80038, sign: true }); + data.append(FP16x16 { mag: 34209, sign: true }); + data.append(FP16x16 { mag: 7672, sign: false }); + data.append(FP16x16 { mag: 27429, sign: false }); + data.append(FP16x16 { mag: 57827, sign: false }); + data.append(FP16x16 { mag: 2154, sign: true }); + data.append(FP16x16 { mag: 44183, sign: false }); + data.append(FP16x16 { mag: 60736, sign: false }); + data.append(FP16x16 { mag: 62972, sign: false }); + data.append(FP16x16 { mag: 10, sign: true }); + data.append(FP16x16 { mag: 30567, sign: true }); + data.append(FP16x16 { mag: 58169, sign: false }); + data.append(FP16x16 { mag: 113162, sign: true }); + data.append(FP16x16 { mag: 47093, sign: true }); + data.append(FP16x16 { mag: 11549, sign: false }); + data.append(FP16x16 { mag: 103543, sign: true }); + data.append(FP16x16 { mag: 19276, sign: true }); + data.append(FP16x16 { mag: 43892, sign: false }); + data.append(FP16x16 { mag: 28123, sign: false }); + data.append(FP16x16 { mag: 94676, sign: false }); + data.append(FP16x16 { mag: 85149, sign: true }); + data.append(FP16x16 { mag: 26867, sign: true }); + data.append(FP16x16 { mag: 112263, sign: true }); + data.append(FP16x16 { mag: 24373, sign: true }); + data.append(FP16x16 { mag: 34755, sign: false }); + data.append(FP16x16 { mag: 9384, sign: false }); + data.append(FP16x16 { mag: 49546, sign: true }); + data.append(FP16x16 { mag: 79711, sign: false }); + data.append(FP16x16 { mag: 27758, sign: true }); + data.append(FP16x16 { mag: 8378, sign: true }); + data.append(FP16x16 { mag: 40580, sign: false }); + data.append(FP16x16 { mag: 112339, sign: true }); + data.append(FP16x16 { mag: 10388, sign: true }); + data.append(FP16x16 { mag: 37733, sign: false }); + data.append(FP16x16 { mag: 105370, sign: false }); + data.append(FP16x16 { mag: 38217, sign: true }); + data.append(FP16x16 { mag: 52641, sign: true }); + data.append(FP16x16 { mag: 33139, sign: true }); + data.append(FP16x16 { mag: 8245, sign: true }); + data.append(FP16x16 { mag: 2784, sign: false }); + data.append(FP16x16 { mag: 113567, sign: false }); + data.append(FP16x16 { mag: 104763, sign: false }); + data.append(FP16x16 { mag: 4564, sign: false }); + data.append(FP16x16 { mag: 15700, sign: false }); + data.append(FP16x16 { mag: 43737, sign: false }); + data.append(FP16x16 { mag: 34793, sign: false }); + data.append(FP16x16 { mag: 57257, sign: true }); + data.append(FP16x16 { mag: 132151, sign: true }); + data.append(FP16x16 { mag: 45201, sign: true }); + data.append(FP16x16 { mag: 98318, sign: false }); + data.append(FP16x16 { mag: 106443, sign: true }); + data.append(FP16x16 { mag: 34909, sign: true }); + data.append(FP16x16 { mag: 32335, sign: true }); + data.append(FP16x16 { mag: 40192, sign: true }); + data.append(FP16x16 { mag: 22349, sign: false }); + data.append(FP16x16 { mag: 49484, sign: true }); + data.append(FP16x16 { mag: 10589, sign: true }); + data.append(FP16x16 { mag: 7125, sign: true }); + data.append(FP16x16 { mag: 104325, sign: true }); + data.append(FP16x16 { mag: 41763, sign: false }); + data.append(FP16x16 { mag: 38116, sign: false }); + data.append(FP16x16 { mag: 84951, sign: true }); + data.append(FP16x16 { mag: 52756, sign: false }); + data.append(FP16x16 { mag: 60485, sign: false }); + data.append(FP16x16 { mag: 90227, sign: false }); + data.append(FP16x16 { mag: 12616, sign: false }); + data.append(FP16x16 { mag: 94036, sign: false }); + data.append(FP16x16 { mag: 60362, sign: true }); + data.append(FP16x16 { mag: 6930, sign: false }); + data.append(FP16x16 { mag: 42650, sign: true }); + data.append(FP16x16 { mag: 57435, sign: false }); + data.append(FP16x16 { mag: 8276, sign: true }); + data.append(FP16x16 { mag: 36396, sign: true }); + data.append(FP16x16 { mag: 157288, sign: false }); + data.append(FP16x16 { mag: 3531, sign: true }); + data.append(FP16x16 { mag: 21519, sign: true }); + data.append(FP16x16 { mag: 1592, sign: true }); + data.append(FP16x16 { mag: 41150, sign: true }); + data.append(FP16x16 { mag: 73234, sign: false }); + data.append(FP16x16 { mag: 66372, sign: false }); + data.append(FP16x16 { mag: 58146, sign: false }); + data.append(FP16x16 { mag: 44555, sign: false }); + data.append(FP16x16 { mag: 35804, sign: true }); + data.append(FP16x16 { mag: 2150, sign: false }); + data.append(FP16x16 { mag: 55208, sign: false }); + data.append(FP16x16 { mag: 41980, sign: true }); + data.append(FP16x16 { mag: 66322, sign: true }); + data.append(FP16x16 { mag: 26460, sign: false }); + data.append(FP16x16 { mag: 77380, sign: false }); + data.append(FP16x16 { mag: 125207, sign: true }); + data.append(FP16x16 { mag: 30086, sign: true }); + data.append(FP16x16 { mag: 15517, sign: false }); + data.append(FP16x16 { mag: 118773, sign: false }); + data.append(FP16x16 { mag: 99314, sign: true }); + data.append(FP16x16 { mag: 11067, sign: true }); + data.append(FP16x16 { mag: 57066, sign: false }); + data.append(FP16x16 { mag: 5311, sign: false }); + data.append(FP16x16 { mag: 69970, sign: false }); + data.append(FP16x16 { mag: 93820, sign: false }); + data.append(FP16x16 { mag: 1079, sign: true }); + data.append(FP16x16 { mag: 9773, sign: false }); + data.append(FP16x16 { mag: 21840, sign: false }); + data.append(FP16x16 { mag: 2160, sign: true }); + data.append(FP16x16 { mag: 3271, sign: true }); + data.append(FP16x16 { mag: 99495, sign: true }); + data.append(FP16x16 { mag: 77215, sign: true }); + data.append(FP16x16 { mag: 85212, sign: false }); + data.append(FP16x16 { mag: 61944, sign: false }); + data.append(FP16x16 { mag: 2082, sign: false }); + data.append(FP16x16 { mag: 107676, sign: true }); + data.append(FP16x16 { mag: 40702, sign: false }); + data.append(FP16x16 { mag: 24223, sign: false }); + data.append(FP16x16 { mag: 82086, sign: true }); + data.append(FP16x16 { mag: 1236, sign: false }); + data.append(FP16x16 { mag: 65294, sign: false }); + data.append(FP16x16 { mag: 18639, sign: true }); + data.append(FP16x16 { mag: 81036, sign: false }); + data.append(FP16x16 { mag: 1472, sign: false }); + data.append(FP16x16 { mag: 26208, sign: true }); + data.append(FP16x16 { mag: 7969, sign: false }); + data.append(FP16x16 { mag: 94838, sign: false }); + data.append(FP16x16 { mag: 48398, sign: true }); + data.append(FP16x16 { mag: 127750, sign: false }); + data.append(FP16x16 { mag: 14694, sign: false }); + data.append(FP16x16 { mag: 98356, sign: false }); + data.append(FP16x16 { mag: 16239, sign: false }); + data.append(FP16x16 { mag: 20463, sign: false }); + data.append(FP16x16 { mag: 125215, sign: false }); + data.append(FP16x16 { mag: 8089, sign: true }); + data.append(FP16x16 { mag: 114956, sign: true }); + data.append(FP16x16 { mag: 56287, sign: false }); + data.append(FP16x16 { mag: 11168, sign: true }); + data.append(FP16x16 { mag: 85072, sign: true }); + data.append(FP16x16 { mag: 53241, sign: true }); + data.append(FP16x16 { mag: 47712, sign: false }); + data.append(FP16x16 { mag: 27321, sign: false }); + data.append(FP16x16 { mag: 2647, sign: false }); + data.append(FP16x16 { mag: 64711, sign: true }); + data.append(FP16x16 { mag: 8104, sign: true }); + data.append(FP16x16 { mag: 5213, sign: false }); + data.append(FP16x16 { mag: 87049, sign: true }); + data.append(FP16x16 { mag: 41663, sign: false }); + data.append(FP16x16 { mag: 26688, sign: true }); + data.append(FP16x16 { mag: 2385, sign: true }); + data.append(FP16x16 { mag: 8540, sign: true }); + data.append(FP16x16 { mag: 7311, sign: true }); + data.append(FP16x16 { mag: 2750, sign: true }); + data.append(FP16x16 { mag: 125527, sign: true }); + data.append(FP16x16 { mag: 51192, sign: false }); + data.append(FP16x16 { mag: 102959, sign: false }); + data.append(FP16x16 { mag: 94206, sign: false }); + data.append(FP16x16 { mag: 22114, sign: false }); + data.append(FP16x16 { mag: 56998, sign: false }); + data.append(FP16x16 { mag: 118819, sign: true }); + data.append(FP16x16 { mag: 87037, sign: false }); + data.append(FP16x16 { mag: 45884, sign: false }); + data.append(FP16x16 { mag: 108580, sign: false }); + data.append(FP16x16 { mag: 175625, sign: true }); + data.append(FP16x16 { mag: 42256, sign: false }); + data.append(FP16x16 { mag: 39523, sign: false }); + data.append(FP16x16 { mag: 29431, sign: false }); + data.append(FP16x16 { mag: 17315, sign: true }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/maxpool_4d_dilations/output_0.cairo b/tests/nodes/maxpool_4d_dilations/output_0.cairo new file mode 100644 index 000000000..9c0185e89 --- /dev/null +++ b/tests/nodes/maxpool_4d_dilations/output_0.cairo @@ -0,0 +1,65 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(3); + shape.append(2); + shape.append(2); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 49149, sign: false }); + data.append(FP16x16 { mag: 93230, sign: false }); + data.append(FP16x16 { mag: 131524, sign: false }); + data.append(FP16x16 { mag: 172442, sign: false }); + data.append(FP16x16 { mag: 171591, sign: false }); + data.append(FP16x16 { mag: 124609, sign: false }); + data.append(FP16x16 { mag: 107561, sign: false }); + data.append(FP16x16 { mag: 193719, sign: false }); + data.append(FP16x16 { mag: 97602, sign: false }); + data.append(FP16x16 { mag: 58097, sign: false }); + data.append(FP16x16 { mag: 117822, sign: false }); + data.append(FP16x16 { mag: 79390, sign: false }); + data.append(FP16x16 { mag: 93738, sign: false }); + data.append(FP16x16 { mag: 114134, sign: false }); + data.append(FP16x16 { mag: 84330, sign: false }); + data.append(FP16x16 { mag: 81192, sign: false }); + data.append(FP16x16 { mag: 93421, sign: false }); + data.append(FP16x16 { mag: 80140, sign: false }); + data.append(FP16x16 { mag: 88854, sign: false }); + data.append(FP16x16 { mag: 93610, sign: false }); + data.append(FP16x16 { mag: 57375, sign: false }); + data.append(FP16x16 { mag: 98163, sign: false }); + data.append(FP16x16 { mag: 116267, sign: false }); + data.append(FP16x16 { mag: 125650, sign: false }); + data.append(FP16x16 { mag: 136686, sign: false }); + data.append(FP16x16 { mag: 123236, sign: false }); + data.append(FP16x16 { mag: 174668, sign: false }); + data.append(FP16x16 { mag: 65025, sign: false }); + data.append(FP16x16 { mag: 68769, sign: false }); + data.append(FP16x16 { mag: 131564, sign: false }); + data.append(FP16x16 { mag: 97200, sign: false }); + data.append(FP16x16 { mag: 90465, sign: false }); + data.append(FP16x16 { mag: 105370, sign: false }); + data.append(FP16x16 { mag: 103120, sign: false }); + data.append(FP16x16 { mag: 113567, sign: false }); + data.append(FP16x16 { mag: 157288, sign: false }); + data.append(FP16x16 { mag: 118773, sign: false }); + data.append(FP16x16 { mag: 180033, sign: false }); + data.append(FP16x16 { mag: 90227, sign: false }); + data.append(FP16x16 { mag: 198738, sign: false }); + data.append(FP16x16 { mag: 101984, sign: false }); + data.append(FP16x16 { mag: 61944, sign: false }); + data.append(FP16x16 { mag: 62972, sign: false }); + data.append(FP16x16 { mag: 67113, sign: false }); + data.append(FP16x16 { mag: 127750, sign: false }); + data.append(FP16x16 { mag: 102959, sign: false }); + data.append(FP16x16 { mag: 94838, sign: false }); + data.append(FP16x16 { mag: 125215, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} From eeceae920c1b3cf382165307daebe01489ab4063 Mon Sep 17 00:00:00 2001 From: chachaleo Date: Sat, 24 Feb 2024 10:52:10 +0100 Subject: [PATCH 03/68] summary + compatibility --- docs/SUMMARY.md | 1 + docs/framework/compatibility.md | 1 + 2 files changed, 2 insertions(+) diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index d867a96ba..67fb0fbd7 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -176,6 +176,7 @@ * [nn.conv](framework/operators/neural-network/nn.conv.md) * [nn.depth_to_space](framework/operators/neural-network/nn.depth_to_space.md) * [nn.space_to_depth](framework/operators/neural-network/nn.space_to_depth.md) + * [nn.max\_pool](framework/operators/neural-network/nn.max\_pool.md) * [Machine Learning](framework/operators/machine-learning/README.md) * [Tree Ensemble Classifier](framework/operators/machine-learning/tree-ensemble-classifier/README.md) * [tree\_ensemble\_classifier.predict](framework/operators/machine-learning/tree-ensemble-classifier/tree\_ensemble\_classifier.predict.md) diff --git a/docs/framework/compatibility.md b/docs/framework/compatibility.md index a05d3bbce..c4ccc859b 100644 --- a/docs/framework/compatibility.md +++ b/docs/framework/compatibility.md @@ -47,6 +47,7 @@ You can see below the list of current supported ONNX Operators: | [Col2im](operators/neural-network/nn.col2im\_sigmoid.md) | :white\_check\_mark: | | [ConvTranspose](operators/neural-network/nn.conv\_transpose_.md) | :white\_check\_mark: | | [Conv](operators/neural-network/nn.conv.md) | :white\_check\_mark: | +| [MaxPool](operators/neural-network/nn.max\_pool.md) | :white\_check\_mark: | | [Sinh](operators/tensor/tensor.sinh.md) | :white\_check\_mark: | | [Asinh](operators/tensor/tensor.asinh.md) | :white\_check\_mark: | | [Atanh](operators/tensor/tensor.atanh.md) | :white\_check\_mark: | From 21fb7915474d9ce2cc005aded6834ab2a350f636 Mon Sep 17 00:00:00 2001 From: chachaleo Date: Tue, 27 Feb 2024 06:05:46 +0100 Subject: [PATCH 04/68] fix: change type of I from Option> to Option> --- .../operators/neural-network/nn.max_pool.md | 6 +- src/operators/nn/core.cairo | 8 +-- src/operators/nn/functional/common_pool.cairo | 5 +- src/operators/nn/functional/max_pool.cairo | 61 ++++++++++--------- .../nn/implementations/nn_fp16x16.cairo | 4 +- .../nn/implementations/nn_fp32x32.cairo | 17 ++++-- .../nn/implementations/nn_fp64x64.cairo | 17 ++++-- .../nn/implementations/nn_fp8x23.cairo | 4 +- src/operators/nn/implementations/nn_i32.cairo | 2 +- src/operators/nn/implementations/nn_i8.cairo | 3 +- src/operators/nn/implementations/nn_u32.cairo | 3 +- 11 files changed, 69 insertions(+), 61 deletions(-) diff --git a/docs/framework/operators/neural-network/nn.max_pool.md b/docs/framework/operators/neural-network/nn.max_pool.md index eb21f4c8c..e3ad8d84e 100644 --- a/docs/framework/operators/neural-network/nn.max_pool.md +++ b/docs/framework/operators/neural-network/nn.max_pool.md @@ -12,7 +12,7 @@ storage_order: Option, strides: Option>, output_len: usize, -) -> (Tensor, Option>); +) -> (Tensor, Option>); ``` MaxPool consumes an input tensor X and applies max pooling across the tensor according to kernel sizes, stride sizes, and pad lengths. max pooling consisting of computing the max on all values of a subset of the input tensor according to the kernel size and downsampling the data into the output tensor Y for further processing. The output spatial shape is calculated differently depending on whether explicit padding is used, where pads is employed, or auto padding is used, where auto_pad is utilized. @@ -32,7 +32,7 @@ MaxPool consumes an input tensor X and applies max pooling across the tensor acc ## Returns A `Tensor` that contains the result of the max pool. -A `Option>` with the indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. +A `Option>` with the indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. ## Examples ```rust @@ -43,7 +43,7 @@ use orion::numbers::FP16x16; use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor}; -fn example_max_pool() -> (Tensor, Option>) { +fn example_max_pool() -> (Tensor, Option>) { let mut shape = ArrayTrait::::new(); shape.append(1); shape.append(1); diff --git a/src/operators/nn/core.cairo b/src/operators/nn/core.cairo index 032880942..d74b43f6d 100644 --- a/src/operators/nn/core.cairo +++ b/src/operators/nn/core.cairo @@ -1319,7 +1319,7 @@ trait NNTrait { /// storage_order: Option, /// strides: Option>, /// output_len: usize, - /// ) -> (Tensor, Option>); + /// ) -> (Tensor, Option>); /// ``` /// /// MaxPool consumes an input tensor X and applies max pooling across the tensor according to kernel sizes, stride sizes, and pad lengths. max pooling consisting of computing the max on all values of a subset of the input tensor according to the kernel size and downsampling the data into the output tensor Y for further processing. The output spatial shape is calculated differently depending on whether explicit padding is used, where pads is employed, or auto padding is used, where auto_pad is utilized. @@ -1339,7 +1339,7 @@ trait NNTrait { /// ## Returns /// /// A `Tensor` that contains the result of the max pool. - /// A `Option>` with the indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. + /// A `Option>` with the indices tensor from max pooling across the input tensor. The dimensions of indices are the same as output tensor. /// ## Examples /// /// ```rust @@ -1350,7 +1350,7 @@ trait NNTrait { /// use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor}; /// /// - /// fn example_max_pool() -> (Tensor, Option>) { + /// fn example_max_pool() -> (Tensor, Option>) { /// let mut shape = ArrayTrait::::new(); /// shape.append(1); /// shape.append(1); @@ -1424,5 +1424,5 @@ trait NNTrait { storage_order: Option, strides: Option>, output_len: usize, - ) -> (Tensor, Option>); + ) -> (Tensor, Option>); } diff --git a/src/operators/nn/functional/common_pool.cairo b/src/operators/nn/functional/common_pool.cairo index 02d8826ce..70bde406a 100644 --- a/src/operators/nn/functional/common_pool.cairo +++ b/src/operators/nn/functional/common_pool.cairo @@ -11,7 +11,6 @@ use orion::numbers::{U32IntoI32, I32IntoU32, I32Div, I32Number}; use orion::numbers::FP16x16; use orion::operators::nn::{AUTO_PAD, POOLING_TYPE}; - fn common_pool< T, MAG, @@ -29,7 +28,6 @@ fn common_pool< +PartialEq, +TryInto, +Into, - +Into, +Rem, +Neg, +SubEq, @@ -44,7 +42,7 @@ fn common_pool< pads: Option>, strides: Option>, p: usize, -) -> (Tensor, Option>) { +) -> (Tensor, Option>) { let padding_value: T = match pooling_type { POOLING_TYPE::AVG => { let padding_value = if count_include_pad == 0 { @@ -188,7 +186,6 @@ fn pool< +PartialEq, +TryInto, +Into, - +Into, +Rem, +Neg, +SubEq, diff --git a/src/operators/nn/functional/max_pool.cairo b/src/operators/nn/functional/max_pool.cairo index 69e060a2b..c6fc8b5c8 100644 --- a/src/operators/nn/functional/max_pool.cairo +++ b/src/operators/nn/functional/max_pool.cairo @@ -30,7 +30,6 @@ fn max_pool< +PartialEq, +TryInto, +Into, - +Into, +Rem, +Neg, +SubEq, @@ -44,7 +43,7 @@ fn max_pool< storage_order: Option, strides: Option>, output_len: usize, -) -> (Tensor, Option>) { +) -> (Tensor, Option>) { match dilations { Option::Some(dilations) => { if (min(dilations) != max(dilations) || min(dilations) != 1) { @@ -173,7 +172,6 @@ fn max_pool_implementation< +PartialEq, +TryInto, +Into, - +Into, +Rem, +Neg, +SubEq, @@ -187,7 +185,7 @@ fn max_pool_implementation< storage_order: Option, strides: Option>, output_len: usize, -) -> (Tensor, Option>) { +) -> (Tensor, Option>) { assert((*X).shape.len() >= 3, 'X must have at least 3 dim'); let n_dims = kernel_shape.len(); @@ -469,7 +467,7 @@ fn max_pool_1d, +NumberTrait, +Copy, +Drop strides: Span, output_spatial_shape: Span, output_len: usize, -) -> (Tensor, Option>) { +) -> (Tensor, Option>) { let mut y_dims = ArrayTrait::new(); y_dims.append_span(SpanTrait::slice((*X).shape, 0, 2)); y_dims.append_span(output_spatial_shape); @@ -523,14 +521,14 @@ fn max_pool_1d, +NumberTrait, +Copy, +Drop }; Y_data.append(Yh); - I_data.append((c * x_step).into() + h_index); + I_data.append((c * x_step) + h_index.into()); ph += 1; }; c += 1; }; if output_len == 1 { - return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); + return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); } return ( TensorTrait::new(y_dims.span(), Y_data.span()), @@ -559,7 +557,7 @@ fn max_pool_2d< strides: Span, output_spatial_shape: Span, output_len: usize, -) -> (Tensor, Option>) { +) -> (Tensor, Option>) { let mut y_dims = ArrayTrait::new(); y_dims.append_span(SpanTrait::slice((*X).shape, 0, 2)); y_dims.append_span(output_spatial_shape); @@ -651,9 +649,9 @@ fn max_pool_2d< if Yh != NumberTrait::::min_value() { Y_data.append(Yh); if storage_order == 0 { - I_data.append((c * x_step).into() + h_index * W.into() + w_index); + I_data.append((c * x_step) + h_index.into() * W + w_index.into()); } else { - I_data.append((c * x_step).into() + h_index + w_index * H.into()); + I_data.append((c * x_step) + h_index.into() + w_index.into() * H); } } pw += 1; @@ -664,7 +662,7 @@ fn max_pool_2d< }; if output_len == 1 { - return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); + return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); } return ( TensorTrait::new(y_dims.span(), Y_data.span()), @@ -693,7 +691,7 @@ fn max_pool_3d< strides: Span, output_spatial_shape: Span, output_len: usize, -) -> (Tensor, Option>) { +) -> (Tensor, Option>) { let mut y_dims = ArrayTrait::new(); y_dims.append_span(SpanTrait::slice((*X).shape, 0, 2)); y_dims.append_span(output_spatial_shape); @@ -813,18 +811,18 @@ fn max_pool_3d< if storage_order == 0 { I_data .append( - (c * x_step).into() - + h_index * W.into() * D.into() - + w_index * D.into() - + d_index + (c * x_step) + + h_index.into() * W * D + + w_index.into() * D + + d_index.into() ); } else { I_data .append( - (c * x_step).into() - + h_index - + w_index * H.into() - + d_index * H.into() * W.into() + (c * x_step) + + h_index.into() + + w_index.into() * H + + d_index.into() * H * W ); } pd += 1; @@ -837,7 +835,7 @@ fn max_pool_3d< }; if output_len == 1 { - return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); + return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); } return ( TensorTrait::new(y_dims.span(), Y_data.span()), @@ -857,7 +855,7 @@ fn max_pool_nd< +PartialEq, +PrintTrait, +TryInto, - +Into, + +Into, +Div >( X: @Tensor, @@ -870,7 +868,7 @@ fn max_pool_nd< strides: Span, output_spatial_shape: Span, output_len: usize, -) -> (Tensor, Option>) { +) -> (Tensor, Option>) { let nd = (*X).shape.len() - 2; let mut y_dims = ArrayTrait::new(); @@ -938,8 +936,10 @@ fn max_pool_nd< nstart.append(ns); nend.append(ns + *ks_n.at(n) * *dilation_n.at(n)); - let a: T = NumberTrait::new_unscaled(((*nend.at(n) - ns)).into(), false); - let b: T = NumberTrait::new_unscaled((*dilation_n.at(n)).into(), false); + let a: T = NumberTrait::new_unscaled( + (*kernel_shape.at(n) * *dilations.at(n)).into(), false + ); + let b: T = NumberTrait::new_unscaled((*dilations.at(n)).into(), false); nstep.append(NumberTrait::ceil(a / b).try_into().unwrap()); n += 1; }; @@ -1004,7 +1004,7 @@ fn max_pool_nd< index += *n_index.at(n) * (*x_stride.at(2 + n)).into(); n += 1; }; - I_data.append((c * x_step).into() + index); + I_data.append((c * x_step) + index.into()); } else { let mut index = 0; let mut n = nd; @@ -1015,16 +1015,19 @@ fn max_pool_nd< index += *n_index.at(n - 1) * (*i_stride_storage_order_1.at(nd - n)).into(); n -= 1; }; - I_data.append((c * x_step).into() + index); + I_data.append((c * x_step) + index.into()); } p += 1; }; c += 1; }; if output_len == 1 { - return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); + return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); } - return (TensorTrait::new(y_dims.span(), Y_data.span()), Option::>::None); + return ( + TensorTrait::new(y_dims.span(), Y_data.span()), + Option::Some(TensorTrait::new(y_dims.span(), I_data.span())) + ); } diff --git a/src/operators/nn/implementations/nn_fp16x16.cairo b/src/operators/nn/implementations/nn_fp16x16.cairo index aa6a7b6ce..24d631a21 100644 --- a/src/operators/nn/implementations/nn_fp16x16.cairo +++ b/src/operators/nn/implementations/nn_fp16x16.cairo @@ -13,8 +13,6 @@ use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ use orion::operators::tensor::implementations::tensor_fp16x16wide::{ FP16x16WTensor, FP16x16WTensorDiv, FP16x16WTensorAdd }; -use orion::numbers::I32IntoU32; -use orion::operators::tensor::implementations::tensor_i32::I32Tensor; use orion::operators::nn::AUTO_PAD; impl FP16x16NN of NNTrait { @@ -159,7 +157,7 @@ impl FP16x16NN of NNTrait { storage_order: Option, strides: Option>, output_len: usize, - ) -> (Tensor, Option>) { + ) -> (Tensor, Option>) { functional::max_pool::max_pool( X, auto_pad, diff --git a/src/operators/nn/implementations/nn_fp32x32.cairo b/src/operators/nn/implementations/nn_fp32x32.cairo index 3054dd1ad..c14e9f544 100644 --- a/src/operators/nn/implementations/nn_fp32x32.cairo +++ b/src/operators/nn/implementations/nn_fp32x32.cairo @@ -7,8 +7,6 @@ use orion::numbers::fixed_point::implementations::fp32x32::core::{FP32x32, FP32x use orion::operators::tensor::implementations::tensor_fp32x32::{ FP32x32Tensor, FP32x32TensorDiv, FP32x32TensorAdd }; -use orion::numbers::I32IntoU32; -use orion::operators::tensor::implementations::tensor_i32::I32Tensor; use orion::operators::nn::AUTO_PAD; impl FP32x32NN of NNTrait { @@ -153,8 +151,17 @@ impl FP32x32NN of NNTrait { storage_order: Option, strides: Option>, output_len: usize, - ) -> (Tensor, Option>) { - //functional::max_pool::max_pool(X, auto_pad, ceil_mode, dilations,kernel_shape, pads, storage_order, strides, output_len) - panic(array!['not supported!']) + ) -> (Tensor, Option>) { + functional::max_pool::max_pool( + X, + auto_pad, + ceil_mode, + dilations, + kernel_shape, + pads, + storage_order, + strides, + output_len + ) } } diff --git a/src/operators/nn/implementations/nn_fp64x64.cairo b/src/operators/nn/implementations/nn_fp64x64.cairo index a378cfb70..c40f6f636 100644 --- a/src/operators/nn/implementations/nn_fp64x64.cairo +++ b/src/operators/nn/implementations/nn_fp64x64.cairo @@ -7,8 +7,6 @@ use orion::numbers::fixed_point::implementations::fp64x64::core::{FP64x64, FP64x use orion::operators::tensor::implementations::tensor_fp64x64::{ FP64x64Tensor, FP64x64TensorDiv, FP64x64TensorAdd }; -//use orion::numbers::I32IntoU64; -use orion::operators::tensor::implementations::tensor_i32::I32Tensor; use orion::operators::nn::AUTO_PAD; impl FP64x64NN of NNTrait { @@ -153,8 +151,17 @@ impl FP64x64NN of NNTrait { storage_order: Option, strides: Option>, output_len: usize, - ) -> (Tensor, Option>) { - //functional::max_pool::max_pool(X, auto_pad, ceil_mode, dilations,kernel_shape, pads, storage_order, strides, output_len) - panic(array!['not supported!']) + ) -> (Tensor, Option>) { + functional::max_pool::max_pool( + X, + auto_pad, + ceil_mode, + dilations, + kernel_shape, + pads, + storage_order, + strides, + output_len + ) } } diff --git a/src/operators/nn/implementations/nn_fp8x23.cairo b/src/operators/nn/implementations/nn_fp8x23.cairo index add955fd9..73cea38fa 100644 --- a/src/operators/nn/implementations/nn_fp8x23.cairo +++ b/src/operators/nn/implementations/nn_fp8x23.cairo @@ -11,8 +11,6 @@ use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ FP8x23WImpl, FP8x23WTryIntoFP8x23, FP8x23W, FP8x23IntoFP8x23W }; use orion::operators::tensor::implementations::tensor_fp8x23wide::{FP8x23WTensor}; -use orion::numbers::I32IntoU32; -use orion::operators::tensor::implementations::tensor_i32::I32Tensor; use orion::operators::nn::AUTO_PAD; impl FP8x23NN of NNTrait { @@ -155,7 +153,7 @@ impl FP8x23NN of NNTrait { storage_order: Option, strides: Option>, output_len: usize, - ) -> (Tensor, Option>) { + ) -> (Tensor, Option>) { functional::max_pool::max_pool( X, auto_pad, diff --git a/src/operators/nn/implementations/nn_i32.cairo b/src/operators/nn/implementations/nn_i32.cairo index 0156fc5f5..0532b74eb 100644 --- a/src/operators/nn/implementations/nn_i32.cairo +++ b/src/operators/nn/implementations/nn_i32.cairo @@ -144,7 +144,7 @@ impl I32NN of NNTrait { storage_order: Option, strides: Option>, output_len: usize, - ) -> (Tensor, Option>) { + ) -> (Tensor, Option>) { panic(array!['not supported!']) } } diff --git a/src/operators/nn/implementations/nn_i8.cairo b/src/operators/nn/implementations/nn_i8.cairo index 284dd5ee1..dc56224bc 100644 --- a/src/operators/nn/implementations/nn_i8.cairo +++ b/src/operators/nn/implementations/nn_i8.cairo @@ -4,7 +4,6 @@ use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::operators::tensor::implementations::tensor_i8::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::implementations::tensor_i32::I32Tensor; use orion::operators::nn::AUTO_PAD; impl I8NN of NNTrait { @@ -145,7 +144,7 @@ impl I8NN of NNTrait { storage_order: Option, strides: Option>, output_len: usize, - ) -> (Tensor, Option>) { + ) -> (Tensor, Option>) { panic(array!['not supported!']) } } diff --git a/src/operators/nn/implementations/nn_u32.cairo b/src/operators/nn/implementations/nn_u32.cairo index 1ebfb3bec..f061b90ae 100644 --- a/src/operators/nn/implementations/nn_u32.cairo +++ b/src/operators/nn/implementations/nn_u32.cairo @@ -4,7 +4,6 @@ use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::operators::tensor::implementations::tensor_u32::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::implementations::tensor_i32::I32Tensor; use orion::operators::nn::AUTO_PAD; impl U32NN of NNTrait { @@ -145,7 +144,7 @@ impl U32NN of NNTrait { storage_order: Option, strides: Option>, output_len: usize, - ) -> (Tensor, Option>) { + ) -> (Tensor, Option>) { panic(array!['not supported!']) } } From 3bacd33606f5f829138364fd9e7ffb26a1b25da6 Mon Sep 17 00:00:00 2001 From: chachaleo Date: Thu, 29 Feb 2024 11:12:07 +0100 Subject: [PATCH 05/68] fix test --- nodegen/node/max_pool.py | 4 +- tests/nodes.cairo | 1045 ----------------- tests/nodes/maxpool_2d_constraint_index.cairo | 46 +- .../output_0.cairo | 4 +- 4 files changed, 27 insertions(+), 1072 deletions(-) diff --git a/nodegen/node/max_pool.py b/nodegen/node/max_pool.py index 9786209a2..a2b240746 100644 --- a/nodegen/node/max_pool.py +++ b/nodegen/node/max_pool.py @@ -981,7 +981,7 @@ def export_maxpool_2d_constraint_index() -> None: x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "maxpool_2d_constraint_index" @@ -994,7 +994,7 @@ def export_maxpool_2d_constraint_index() -> None: func_sig += "Option::None," func_sig += "Option::Some(1)," func_sig += "Option::Some(array![2, 2].span())," - func_sig += "1)" + func_sig += "2)" make_test( [x], z, func_sig, name, Trait.NN) diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 2133d4f46..4ce28511d 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1,1048 +1,3 @@ -mod abs_fp16x16; -mod abs_fp8x23; -mod abs_i32; -mod abs_i8; -mod acos_fp16x16; -mod acos_fp8x23; -mod acosh_fp16x16; -mod acosh_fp8x23; -mod add_fp16x16; -mod add_fp16x16_broadcast; -mod add_fp8x23; -mod add_fp8x23_broadcast; -mod add_i32; -mod add_i32_broadcast; -mod add_i8; -mod add_i8_broadcast; -mod add_u32; -mod add_u32_broadcast; -mod argmax_fp16x16_1D_default; -mod argmax_fp16x16_1D_keepdims_false; -mod argmax_fp16x16_1D_last_index; -mod argmax_fp16x16_2D_default; -mod argmax_fp16x16_2D_keepdims_false; -mod argmax_fp16x16_2D_last_index; -mod argmax_fp16x16_3D_default; -mod argmax_fp16x16_3D_keepdims_false; -mod argmax_fp16x16_3D_last_index; -mod argmax_fp8x23_1D_default; -mod argmax_fp8x23_1D_keepdims_false; -mod argmax_fp8x23_1D_last_index; -mod argmax_fp8x23_2D_default; -mod argmax_fp8x23_2D_keepdims_false; -mod argmax_fp8x23_2D_last_index; -mod argmax_fp8x23_3D_default; -mod argmax_fp8x23_3D_keepdims_false; -mod argmax_fp8x23_3D_last_index; -mod argmax_i32_1D_default; -mod argmax_i32_1D_keepdims_false; -mod argmax_i32_1D_last_index; -mod argmax_i32_2D_default; -mod argmax_i32_2D_keepdims_false; -mod argmax_i32_2D_last_index; -mod argmax_i32_3D_default; -mod argmax_i32_3D_keepdims_false; -mod argmax_i32_3D_last_index; -mod argmax_i8_1D_default; -mod argmax_i8_1D_keepdims_false; -mod argmax_i8_1D_last_index; -mod argmax_i8_2D_default; -mod argmax_i8_2D_keepdims_false; -mod argmax_i8_2D_last_index; -mod argmax_i8_3D_default; -mod argmax_i8_3D_keepdims_false; -mod argmax_i8_3D_last_index; -mod argmax_u32_1D_default; -mod argmax_u32_1D_keepdims_false; -mod argmax_u32_1D_last_index; -mod argmax_u32_2D_default; -mod argmax_u32_2D_keepdims_false; -mod argmax_u32_2D_last_index; -mod argmax_u32_3D_default; -mod argmax_u32_3D_keepdims_false; -mod argmax_u32_3D_last_index; -mod argmin_fp16x16_1D_default; -mod argmin_fp16x16_1D_keepdims_false; -mod argmin_fp16x16_1D_last_index; -mod argmin_fp16x16_2D_default; -mod argmin_fp16x16_2D_keepdims_false; -mod argmin_fp16x16_2D_last_index; -mod argmin_fp16x16_3D_default; -mod argmin_fp16x16_3D_keepdims_false; -mod argmin_fp16x16_3D_last_index; -mod argmin_fp8x23_1D_default; -mod argmin_fp8x23_1D_keepdims_false; -mod argmin_fp8x23_1D_last_index; -mod argmin_fp8x23_2D_default; -mod argmin_fp8x23_2D_keepdims_false; -mod argmin_fp8x23_2D_last_index; -mod argmin_fp8x23_3D_default; -mod argmin_fp8x23_3D_keepdims_false; -mod argmin_fp8x23_3D_last_index; -mod argmin_i32_1D_default; -mod argmin_i32_1D_keepdims_false; -mod argmin_i32_1D_last_index; -mod argmin_i32_2D_default; -mod argmin_i32_2D_keepdims_false; -mod argmin_i32_2D_last_index; -mod argmin_i32_3D_default; -mod argmin_i32_3D_keepdims_false; -mod argmin_i32_3D_last_index; -mod argmin_i8_1D_default; -mod argmin_i8_1D_keepdims_false; -mod argmin_i8_1D_last_index; -mod argmin_i8_2D_default; -mod argmin_i8_2D_keepdims_false; -mod argmin_i8_2D_last_index; -mod argmin_i8_3D_default; -mod argmin_i8_3D_keepdims_false; -mod argmin_i8_3D_last_index; -mod argmin_u32_1D_default; -mod argmin_u32_1D_keepdims_false; -mod argmin_u32_1D_last_index; -mod argmin_u32_2D_default; -mod argmin_u32_2D_keepdims_false; -mod argmin_u32_2D_last_index; -mod argmin_u32_3D_default; -mod argmin_u32_3D_keepdims_false; -mod argmin_u32_3D_last_index; -mod asin_fp16x16; -mod asin_fp8x23; -mod asinh_fp16x16; -mod asinh_fp8x23; -mod atan_fp16x16; -mod atan_fp8x23; -mod ceil_fp16x16; -mod ceil_fp8x23; -mod concat_fp16x16_1d; -mod concat_fp16x16_2d; -mod concat_fp16x16_3d_default; -mod concat_fp16x16_3d_axis_1; -mod concat_fp16x16_3d_axis_2; -mod concat_fp16x16_3d_three_tensors_axis_1; -mod concat_fp16x16_3d_three_tensors_axis_2; -mod concat_fp8x23_1d; -mod concat_fp8x23_2d; -mod concat_fp8x23_3d_default; -mod concat_fp8x23_3d_axis_1; -mod concat_fp8x23_3d_axis_2; -mod concat_fp8x23_3d_three_tensors_axis_1; -mod concat_fp8x23_3d_three_tensors_axis_2; -mod concat_i32_1d; -mod concat_i32_2d; -mod concat_i32_3d_default; -mod concat_i32_3d_axis_1; -mod concat_i32_3d_axis_2; -mod concat_i32_3d_three_tensors_axis_1; -mod concat_i32_3d_three_tensors_axis_2; -mod concat_i8_1d; -mod concat_i8_2d; -mod concat_i8_3d_default; -mod concat_i8_3d_axis_1; -mod concat_i8_3d_axis_2; -mod concat_i8_3d_three_tensors_axis_1; -mod concat_i8_3d_three_tensors_axis_2; -mod concat_u32_1d; -mod concat_u32_2d; -mod concat_u32_3d_default; -mod concat_u32_3d_axis_1; -mod concat_u32_3d_axis_2; -mod concat_u32_3d_three_tensors_axis_1; -mod concat_u32_3d_three_tensors_axis_2; -mod cos_fp16x16; -mod cos_fp8x23; -mod cosh_fp16x16; -mod cosh_fp8x23; -mod cumsum_fp16x16_1d_default; -mod cumsum_fp16x16_1d_exclusive; -mod cumsum_fp16x16_1d_reverse; -mod cumsum_fp16x16_1d_reverse_exclusive; -mod cumsum_fp16x16_2d_axis_0; -mod cumsum_fp16x16_2d_axis_1; -mod cumsum_fp8x23_1d_default; -mod cumsum_fp8x23_1d_exclusive; -mod cumsum_fp8x23_1d_reverse; -mod cumsum_fp8x23_1d_reverse_exclusive; -mod cumsum_fp8x23_2d_axis_0; -mod cumsum_fp8x23_2d_axis_1; -mod cumsum_i32_1d_default; -mod cumsum_i32_1d_exclusive; -mod cumsum_i32_1d_reverse; -mod cumsum_i32_1d_reverse_exclusive; -mod cumsum_i32_2d_axis_0; -mod cumsum_i32_2d_axis_1; -mod cumsum_i8_1d_default; -mod cumsum_i8_1d_exclusive; -mod cumsum_i8_1d_reverse; -mod cumsum_i8_1d_reverse_exclusive; -mod cumsum_i8_2d_axis_0; -mod cumsum_i8_2d_axis_1; -mod cumsum_u32_1d_default; -mod cumsum_u32_1d_exclusive; -mod cumsum_u32_1d_reverse; -mod cumsum_u32_1d_reverse_exclusive; -mod cumsum_u32_2d_axis_0; -mod cumsum_u32_2d_axis_1; -mod div_fp16x16; -mod div_fp16x16_broadcast; -mod div_fp8x23; -mod div_fp8x23_broadcast; -mod div_i32; -mod div_i32_broadcast; -mod div_i8; -mod div_i8_broadcast; -mod div_u32; -mod div_u32_broadcast; -mod equal_fp16x16; -mod equal_fp16x16_broadcast; -mod equal_fp8x23; -mod equal_fp8x23_broadcast; -mod equal_i32; -mod equal_i32_broadcast; -mod equal_i8; -mod equal_i8_broadcast; -mod equal_u32; -mod equal_u32_broadcast; -mod exp_fp16x16; -mod exp_fp8x23; -mod less_equal_fp16x16; -mod less_equal_fp16x16_broadcast; -mod less_equal_fp8x23; -mod less_equal_fp8x23_broadcast; -mod less_equal_i32; -mod less_equal_i32_broadcast; -mod less_equal_i8; -mod less_equal_i8_broadcast; -mod less_equal_u32; -mod less_equal_u32_broadcast; -mod greater_fp16x16; -mod greater_fp16x16_broadcast; -mod greater_fp8x23; -mod greater_fp8x23_broadcast; -mod greater_i32; -mod greater_i32_broadcast; -mod greater_i8; -mod greater_i8_broadcast; -mod greater_u32; -mod greater_u32_broadcast; -mod leaky_relu_fp16x16; -mod leaky_relu_fp8x23; -mod linear_fp16x16; -mod linear_fp8x23; -mod linear_i32; -mod linear_i8; -mod linear_u32; -mod log_fp16x16; -mod log_fp8x23; -mod logsoftmax_fp16x16_axis_0; -mod logsoftmax_fp16x16_axis_1; -mod logsoftmax_fp8x23_axis_0; -mod logsoftmax_fp8x23_axis_1; -mod matmul_fp16x16_1d; -mod matmul_fp16x16_2x2; -mod matmul_fp16x16_2x1; -mod matmul_fp16x16_1x2; -mod matmul_fp8x23_1d; -mod matmul_fp8x23_2x2; -mod matmul_fp8x23_2x1; -mod matmul_fp8x23_1x2; -mod matmul_i32_1d; -mod matmul_i32_2x2; -mod matmul_i32_2x1; -mod matmul_i32_1x2; -mod matmul_i8_1d; -mod matmul_i8_2x2; -mod matmul_i8_2x1; -mod matmul_i8_1x2; -mod matmul_u32_1d; -mod matmul_u32_2x2; -mod matmul_u32_2x1; -mod matmul_u32_1x2; -mod mul_fp16x16; -mod mul_fp16x16_broadcast; -mod mul_fp8x23; -mod mul_fp8x23_broadcast; -mod mul_i32; -mod mul_i32_broadcast; -mod mul_i8; -mod mul_i8_broadcast; -mod mul_u32; -mod mul_u32_broadcast; -mod or_fp16x16; -mod or_fp16x16_broadcast; -mod or_fp8x23; -mod or_fp8x23_broadcast; -mod or_i32; -mod or_i32_broadcast; -mod or_i8; -mod or_i8_broadcast; -mod or_u32; -mod or_u32_broadcast; -mod reduce_sum_fp16x16_1D; -mod reduce_sum_fp16x16_2D_default; -mod reduce_sum_fp16x16_2D_keepdims; -mod reduce_sum_fp16x16_2D_axis_1; -mod reduce_sum_fp8x23_1D; -mod reduce_sum_fp8x23_2D_default; -mod reduce_sum_fp8x23_2D_keepdims; -mod reduce_sum_fp8x23_2D_axis_1; -mod reduce_sum_i32_1D; -mod reduce_sum_i32_2D_default; -mod reduce_sum_i32_2D_keepdims; -mod reduce_sum_i32_2D_axis_1; -mod reduce_sum_i8_1D; -mod reduce_sum_i8_2D_default; -mod reduce_sum_i8_2D_keepdims; -mod reduce_sum_i8_2D_axis_1; -mod reduce_sum_u32_1D; -mod reduce_sum_u32_2D_default; -mod reduce_sum_u32_2D_keepdims; -mod reduce_sum_u32_2D_axis_1; -mod relu_fp16x16; -mod relu_fp8x23; -mod relu_i32; -mod relu_i8; -mod sigmoid_fp16x16; -mod sigmoid_fp8x23; -mod sin_fp16x16; -mod sin_fp8x23; -mod sinh_fp16x16; -mod sinh_fp8x23; -mod softmax_fp16x16; -mod softmax_fp8x23; -mod softplus_fp8x23; -mod softplus_fp16x16; -mod softsign_fp8x23; -mod softsign_fp16x16; -mod sqrt_fp16x16; -mod sqrt_fp8x23; -mod sub_fp16x16; -mod sub_fp16x16_broadcast; -mod sub_fp8x23; -mod sub_fp8x23_broadcast; -mod sub_i32; -mod sub_i32_broadcast; -mod sub_i8; -mod sub_i8_broadcast; -mod sub_u32; -mod sub_u32_broadcast; -mod tanh_fp16x16; -mod tanh_fp8x23; -mod transpose_fp16x16_2d; -mod transpose_fp16x16_3d; -mod transpose_fp8x23_2d; -mod transpose_fp8x23_3d; -mod transpose_i32_2d; -mod transpose_i32_3d; -mod transpose_i8_2d; -mod transpose_i8_3d; -mod transpose_u32_2d; -mod transpose_u32_3d; -mod xor_fp16x16; -mod xor_fp16x16_broadcast; -mod xor_fp8x23; -mod xor_fp8x23_broadcast; -mod xor_i32; -mod xor_i32_broadcast; -mod xor_i8; -mod xor_i8_broadcast; -mod xor_u32; -mod xor_u32_broadcast; -mod less_fp16x16; -mod less_fp16x16_broadcast; -mod less_fp8x23; -mod less_fp8x23_broadcast; -mod less_i32; -mod less_i32_broadcast; -mod less_i8; -mod less_i8_broadcast; -mod less_u32; -mod less_u32_broadcast; -mod greater_equal_fp16x16; -mod greater_equal_fp16x16_broadcast; -mod greater_equal_fp8x23; -mod greater_equal_fp8x23_broadcast; -mod greater_equal_i32; -mod greater_equal_i32_broadcast; -mod greater_equal_i8; -mod greater_equal_i8_broadcast; -mod greater_equal_u32; -mod greater_equal_u32_broadcast; -mod slice_fp16x16_2d; -mod slice_fp16x16_3d; -mod slice_fp8x23_2d; -mod slice_fp8x23_3d; -mod slice_i32_2d; -mod slice_i32_3d; -mod slice_i8_2d; -mod slice_i8_3d; -mod slice_u32_2d; -mod slice_u32_3d; -mod gather_fp8x23_3d_default; -mod gather_fp8x23_3d_axis1; -mod gather_fp8x23_3d_axis2; -mod gather_fp16x16_3d_default; -mod gather_fp16x16_3d_axis1; -mod gather_fp16x16_3d_axis2; -mod gather_i8_3d_default; -mod gather_i8_3d_axis1; -mod gather_i8_3d_axis2; -mod gather_i32_3d_default; -mod gather_i32_3d_axis1; -mod gather_i32_3d_axis2; -mod gather_u32_3d_default; -mod gather_u32_3d_axis1; -mod gather_u32_3d_axis2; -mod nonzero_fp16x16_2d; -mod nonzero_fp16x16_3d; -mod nonzero_fp8x23_2d; -mod nonzero_fp8x23_3d; -mod nonzero_i32_2d; -mod nonzero_i32_3d; -mod nonzero_i8_2d; -mod nonzero_i8_3d; -mod nonzero_u32_2d; -mod nonzero_u32_3d; -mod squeeze_fP16x16; -mod squeeze_fP8x23; -mod squeeze_i32; -mod squeeze_i8; -mod squeeze_u32; -mod unsqueeze_fp16x16_2d; -mod unsqueeze_fp16x16_3d; -mod unsqueeze_fp8x23_2d; -mod unsqueeze_fp8x23_3d; -mod unsqueeze_i32_2d; -mod unsqueeze_i32_3d; -mod unsqueeze_i8_2d; -mod unsqueeze_i8_3d; -mod unsqueeze_u32_2d; -mod unsqueeze_u32_3d; -mod sign_fP16x16; -mod sign_fP8x23; -mod sign_fail; -mod sign_i32; -mod sign_i8; -mod clip_fp16x16_2d; -mod clip_fp16x16_3d; -mod clip_fp8x23_2d; -mod clip_fp8x23_3d; -mod clip_i32_2d; -mod clip_i32_3d; -mod clip_i8_2d; -mod clip_i8_3d; -mod clip_u32_2d; -mod clip_u32_3d; -mod identity_fP16x16; -mod identity_fP8x23; -mod identity_i32; -mod identity_i8; -mod identity_u32; -mod thresholded_relu_fp16x16; -mod thresholded_relu_fp8x23; -mod hard_sigmoid_fp8x23; -mod hard_sigmoid_fp16x16; -mod neg_fp16x16; -mod neg_fp8x23; -mod neg_i32; -mod neg_i8; -mod gemm_all_attributes; -mod gemm_alpha; -mod gemm_beta; -mod gemm_default_matrix_bias; -mod gemm_default_vector_bias; -mod gemm_default_no_bias; -mod gemm_transposeA; -mod gemm_transposeB; -mod min_fp16x16_three_tensors; -mod min_fp16x16_broadcast_three_tensors; -mod min_fp16x16_two_tensors; -mod min_fp16x16_broadcast_two_tensors; -mod min_fp8x23_three_tensors; -mod min_fp8x23_broadcast_three_tensors; -mod min_fp8x23_two_tensors; -mod min_fp8x23_broadcast_two_tensors; -mod min_i32_three_tensors; -mod min_i32_broadcast_three_tensors; -mod min_i32_two_tensors; -mod min_i32_broadcast_two_tensors; -mod min_i8_three_tensors; -mod min_i8_broadcast_three_tensors; -mod min_i8_two_tensors; -mod min_i8_broadcast_two_tensors; -mod min_u32_three_tensors; -mod min_u32_broadcast_three_tensors; -mod min_u32_two_tensors; -mod min_u32_broadcast_two_tensors; -mod where_fp16x16; -mod where_fp16x16_broadcast; -mod where_fp8x23; -mod where_fp8x23_broadcast; -mod where_i32; -mod where_i32_broadcast; -mod where_i8; -mod where_i8_broadcast; -mod where_u32; -mod where_u32_broadcast; -mod not_bool; -mod round_fp16x16; -mod round_fp8x23; -mod max_fp16x16_three_tensors; -mod max_fp16x16_broadcast_three_tensors; -mod max_fp16x16_two_tensors; -mod max_fp16x16_broadcast_two_tensors; -mod max_fp8x23_three_tensors; -mod max_fp8x23_broadcast_three_tensors; -mod max_fp8x23_two_tensors; -mod max_fp8x23_broadcast_two_tensors; -mod max_i32_three_tensors; -mod max_i32_broadcast_three_tensors; -mod max_i32_two_tensors; -mod max_i32_broadcast_two_tensors; -mod max_i8_three_tensors; -mod max_i8_broadcast_three_tensors; -mod max_i8_two_tensors; -mod max_i8_broadcast_two_tensors; -mod max_u32_three_tensors; -mod max_u32_broadcast_three_tensors; -mod max_u32_two_tensors; -mod max_u32_broadcast_two_tensors; -mod scatter_fp16x16_3d_default; -mod scatter_fp16x16_3d_axis1; -mod scatter_fp16x16_3d_axis1_add; -mod scatter_fp8x23_default; -mod scatter_fp8x23_axis1; -mod scatter_fp8x23_mul; -mod scatter_i8_default; -mod scatter_i8_axis1; -mod scatter_i8_axis1_max; -mod scatter_u32_default; -mod scatter_u32_axis1; -mod scatter_u32_add; -mod array_feature_extractor_1D_i32; -mod array_feature_extractor_1D_fp8x23; -mod array_feature_extractor_1D_fp16x16; -mod array_feature_extractor_2D_i32; -mod array_feature_extractor_2D_fp8x23; -mod array_feature_extractor_2D_fp16x16; -mod array_feature_extractor_3D_i32; -mod array_feature_extractor_3D_fp8x23; -mod array_feature_extractor_3D_fp16x16; -mod binarizer_fp16x16; -mod binarizer_fp8x23; -mod tril_fp16x16; -mod tril_fp16x16_neg; -mod tril_fp16x16_one_row; -mod tril_fp16x16_out_neg; -mod tril_fp16x16_out_pos; -mod tril_fp16x16_pos; -mod tril_fp16x16_square; -mod tril_fp16x16_square_neg; -mod tril_fp16x16_zero; -mod triu_fp16x16; -mod triu_fp16x16_neg; -mod triu_fp16x16_one_row; -mod triu_fp16x16_out_neg; -mod triu_fp16x16_out_pos; -mod triu_fp16x16_pos; -mod triu_fp16x16_square; -mod triu_fp16x16_square_neg; -mod triu_fp16x16_zero; -mod tril_fp8x23; -mod tril_fp8x23_neg; -mod tril_fp8x23_one_row; -mod tril_fp8x23_out_neg; -mod tril_fp8x23_out_pos; -mod tril_fp8x23_pos; -mod tril_fp8x23_square; -mod tril_fp8x23_square_neg; -mod tril_fp8x23_zero; -mod triu_fp8x23; -mod triu_fp8x23_neg; -mod triu_fp8x23_one_row; -mod triu_fp8x23_out_neg; -mod triu_fp8x23_out_pos; -mod triu_fp8x23_pos; -mod triu_fp8x23_square; -mod triu_fp8x23_square_neg; -mod triu_fp8x23_zero; -mod tril_i32; -mod tril_neg_i32; -mod tril_i32_one_row; -mod tril_i32_out_neg; -mod tril_i32_out_pos; -mod tril_i32_pos; -mod tril_i32_square; -mod tril_i32_square_neg; -mod tril_i32_zero; -mod triu_i32; -mod triu_i32_neg; -mod triu_i32_one_row; -mod triu_i32_out_neg; -mod triu_i32_out_pos; -mod triu_i32_pos; -mod triu_i32_square; -mod triu_i32_square_neg; -mod triu_i32_zero; -mod tril_i8; -mod tril_i8_neg; -mod tril_i8_one_row; -mod tril_i8_out_neg; -mod tril_i8_out_pos; -mod tril_i8_pos; -mod tril_i8_square; -mod tril_i8_square_neg; -mod tril_i8_zero; -mod triu_i8; -mod triu_i8_neg; -mod triu_i8_one_row; -mod triu_i8_out_neg; -mod triu_i8_out_pos; -mod triu_i8_pos; -mod triu_i8_square; -mod triu_i8_square_neg; -mod triu_i8_zero; -mod tril_u32; -mod tril_u32_neg; -mod tril_u32_one_row; -mod tril_u32_out_neg; -mod tril_u32_out_pos; -mod tril_u32_pos; -mod tril_u32_square; -mod tril_u32_square_neg; -mod tril_u32_zero; -mod triu_u32; -mod triu_u32_neg; -mod triu_u32_one_row; -mod triu_u32_out_neg; -mod triu_u32_out_pos; -mod triu_u32_pos; -mod triu_u32_square; -mod triu_u32_square_neg; -mod triu_u32_zero; -mod reduce_sum_square_fp16x16_export_do_not_keepdims; -mod reduce_sum_square_fp16x16_export_keepdims; -mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; -mod reduce_sum_square_fp8x23_export_do_not_keepdims; -mod reduce_sum_square_fp8x23_export_keepdims; -mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; -mod reduce_sum_square_i32_export_do_not_keepdims; -mod reduce_sum_square_i32_export_keepdims; -mod reduce_sum_square_i32_export_negative_axes_keepdims; -mod reduce_sum_square_i8_export_do_not_keepdims; -mod reduce_sum_square_i8_export_keepdims; -mod reduce_sum_square_i8_export_negative_axes_keepdims; -mod reduce_sum_square_u32_export_do_not_keepdims; -mod reduce_sum_square_u32_export_keepdims; -mod reduce_sum_square_u32_export_negative_axes_keepdims; -mod reduce_l2_fp16x16_export_do_not_keepdims; -mod reduce_l2_fp16x16_export_keepdims; -mod reduce_l2_fp16x16_export_negative_axes_keepdims; -mod reduce_l2_fp8x23_export_do_not_keepdims; -mod reduce_l2_fp8x23_export_keepdims; -mod reduce_l2_fp8x23_export_negative_axes_keepdims; -mod reduce_l1_fp16x16_export_do_not_keepdims; -mod reduce_l1_fp16x16_export_keepdims; -mod reduce_l1_fp16x16_export_negative_axes_keepdims; -mod reduce_l1_fp8x23_export_do_not_keepdims; -mod reduce_l1_fp8x23_export_keepdims; -mod reduce_l1_fp8x23_export_negative_axes_keepdims; -mod reduce_l1_i32_export_do_not_keepdims; -mod reduce_l1_i32_export_keepdims; -mod reduce_l1_i32_export_negative_axes_keepdims; -mod reduce_l1_i8_export_do_not_keepdims; -mod reduce_l1_i8_export_keepdims; -mod reduce_l1_i8_export_negative_axes_keepdims; -mod reduce_l1_u32_export_do_not_keepdims; -mod reduce_l1_u32_export_keepdims; -mod reduce_l1_u32_export_negative_axes_keepdims; -mod reduce_prod_fp16x16_1D; -mod reduce_prod_fp16x16_2D_default; -mod reduce_prod_fp16x16_2D_keepdims; -mod reduce_prod_fp16x16_2D_axis_1; -mod reduce_prod_fp8x23_1D; -mod reduce_prod_fp8x23_2D_default; -mod reduce_prod_fp8x23_2D_keepdims; -mod reduce_prod_fp8x23_2D_axis_1; -mod reduce_prod_i32_1D; -mod reduce_prod_i32_2D_default; -mod reduce_prod_i32_2D_keepdims; -mod reduce_prod_i32_2D_axis_1; -mod reduce_prod_i8_1D; -mod reduce_prod_i8_2D_default; -mod reduce_prod_i8_2D_keepdims; -mod reduce_prod_i8_2D_axis_1; -mod reduce_prod_u32_1D; -mod reduce_prod_u32_2D_default; -mod reduce_prod_u32_2D_keepdims; -mod reduce_prod_u32_2D_axis_1; -mod gather_elements_fp16x16_3d_default; -mod gather_elements_fp16x16_3d_axis1; -mod gather_elements_fp16x16_3d_axis2; -mod gather_elements_fp8x23_3d_default; -mod gather_elements_fp8x23_3d_axis1; -mod gather_elements_fp8x23_3d_axis2; -mod gather_elements_i8_3d_default; -mod gather_elements_i8_3d_axis1; -mod gather_elements_i32_3d_default; -mod gather_elements_i32_3d_axis1; -mod gather_elements_i32_3d_axis2; -mod gather_elements_u32_default; -mod gather_elements_u32_axis1; -mod gather_elements_u32_axis2; -mod gather_elements_u32_axis3; -mod sequence_length_fp16x16; -mod sequence_length_fp16x16_broadcast; -mod sequence_length_fp8x23; -mod sequence_length_fp8x23_broadcast; -mod sequence_length_i32; -mod sequence_length_i32_broadcast; -mod sequence_length_i8; -mod sequence_length_i8_broadcast; -mod sequence_length_u32; -mod sequence_length_u32_broadcast; -mod sequence_at_u32_positive; -mod sequence_at_u32_negative; -mod sequence_at_fp16x16_positive; -mod sequence_at_fp16x16_negative; -mod sequence_at_fp8x23_positive; -mod sequence_at_fp8x23_negative; -mod sequence_at_i32_positive; -mod sequence_at_i32_negative; -mod sequence_at_i8_positive; -mod sequence_at_i8_negative; -mod reduce_min_fp16x16_1D; -mod reduce_min_fp16x16_2D_default; -mod reduce_min_fp16x16_2D_keepdims; -mod reduce_min_fp16x16_2D_axis_1; -mod reduce_min_fp8x23_1D; -mod reduce_min_fp8x23_2D_default; -mod reduce_min_fp8x23_2D_keepdims; -mod reduce_min_fp8x23_2D_axis_1; -mod reduce_min_i32_1D; -mod reduce_min_i32_2D_default; -mod reduce_min_i32_2D_keepdims; -mod reduce_min_i32_2D_axis_1; -mod reduce_min_i8_1D; -mod reduce_min_i8_2D_default; -mod reduce_min_i8_2D_keepdims; -mod reduce_min_i8_2D_axis_1; -mod reduce_min_u32_1D; -mod reduce_min_u32_2D_default; -mod reduce_min_u32_2D_keepdims; -mod reduce_min_u32_2D_axis_1; -mod sequence_construct_fp16x16; -mod sequence_construct_fp8x23; -mod sequence_construct_i32; -mod sequence_construct_i8; -mod sequence_construct_u32; -mod shrink_hard_fp16x16; -mod shrink_soft_fp16x16; -mod shrink_hard_fp8x23; -mod shrink_soft_fp8x23; -mod sequence_empty_fp16x16; -mod sequence_empty_fp8x23; -mod sequence_empty_i32; -mod sequence_empty_i8; -mod sequence_empty_u32; -mod reduce_mean_fp16x16_1D; -mod reduce_mean_fp16x16_2D_default; -mod reduce_mean_fp16x16_2D_keepdims; -mod reduce_mean_fp16x16_2D_axis_1; -mod reduce_mean_fp8x23_1D; -mod reduce_mean_fp8x23_2D_default; -mod reduce_mean_fp8x23_2D_keepdims; -mod reduce_mean_fp8x23_2D_axis_1; -mod reduce_mean_i32_1D; -mod reduce_mean_i32_2D_default; -mod reduce_mean_i32_2D_keepdims; -mod reduce_mean_i32_2D_axis_1; -mod reduce_mean_i8_1D; -mod reduce_mean_i8_2D_default; -mod reduce_mean_i8_2D_keepdims; -mod reduce_mean_i8_2D_axis_1; -mod reduce_mean_u32_1D; -mod reduce_mean_u32_2D_default; -mod reduce_mean_u32_2D_keepdims; -mod reduce_mean_u32_2D_axis_1; -mod pow_fp16x16; -mod pow_fp16x16_broadcast; -mod pow_fp8x23; -mod pow_fp8x23_broadcast; -mod sequence_erase_u32_positive; -mod sequence_erase_u32_negative; -mod sequence_erase_u32_empty; -mod sequence_erase_fp16x16_positive; -mod sequence_erase_fp16x16_negative; -mod sequence_erase_fp16x16_empty; -mod sequence_erase_fp8x23_positive; -mod sequence_erase_fp8x23_negative; -mod sequence_erase_fp8x23_empty; -mod sequence_erase_i32_positive; -mod sequence_erase_i32_negative; -mod sequence_erase_i32_empty; -mod sequence_erase_i8_positive; -mod sequence_erase_i8_negative; -mod sequence_erase_i8_empty; -mod sequence_insert_fp16x16; -mod sequence_insert_fp8x23; -mod sequence_insert_i32; -mod sequence_insert_i8; -mod sequence_insert_u32; -mod concat_from_sequence_fp8x23_new_axis_zero; -mod concat_from_sequence_fp8x23_new_axis_one; -mod concat_from_sequence_fp8x23_new_axis_default; -mod concat_from_sequence_fp16x16_new_axis_zero; -mod concat_from_sequence_fp16x16_new_axis_one; -mod concat_from_sequence_fp16x16_new_axis_default; -mod concat_from_sequence_i32_new_axis_zero; -mod concat_from_sequence_i32_new_axis_one; -mod concat_from_sequence_i32_new_axis_default; -mod concat_from_sequence_i8_new_axis_zero; -mod concat_from_sequence_i8_new_axis_one; -mod concat_from_sequence_i8_new_axis_default; -mod concat_from_sequence_u32_new_axis_zero; -mod concat_from_sequence_u32_new_axis_one; -mod concat_from_sequence_u32_new_axis_default; -mod is_nan_fp16x16; -mod is_nan_fp8x23; -mod is_inf_fp16x16; -mod is_inf_fp8x23; -mod is_inf_i32; -mod is_inf_i8; -mod is_inf_u32; -mod is_pos_inf_fp16x16; -mod is_neg_inf_fp16x16; -mod is_pos_inf_fp8x23; -mod is_neg_inf_fp8x23; -mod is_pos_inf_i32; -mod is_neg_inf_i32; -mod is_pos_inf_i8; -mod is_neg_inf_i8; -mod reduce_log_sum_fp8x23_export_do_not_keepdims; -mod reduce_log_sum_fp8x23_export_keepdims; -mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; -mod reduce_log_sum_fp16x16_export_do_not_keepdims; -mod reduce_log_sum_fp16x16_export_keepdims; -mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; -mod and_bool; -mod erf_fp16x16; -mod erf_fp8x23; -mod unique_fp16x16_without_axis_sorted; -mod unique_fp16x16_with_axis_zero_sorted; -mod unique_u32_without_axis_sorted; -mod unique_u32_without_axis_not_sorted; -mod unique_u32_with_axis_zero_sorted; -mod unique_u32_with_axis_zero_not_sorted; -mod unique_u32_with_axis_one_sorted; -mod unique_u32_with_axis_one_not_sorted; -mod gather_nd_fp16x16_3d_default; -mod gather_nd_fp16x16_3d_batch_dims1; -mod gather_nd_fp16x16_3d_batch_dims2; -mod gather_nd_fp8x23_3d_default; -mod gather_nd_fp8x23_3d_batch_dims1; -mod gather_nd_fp8x23_3d_batch_dims2; -mod gather_nd_i32_3d_default; -mod gather_nd_i32_3d_batch_dims1; -mod gather_nd_i32_3d_batch_dims2; -mod gather_nd_i8_3d_default; -mod gather_nd_i8_3d_batch_dims1; -mod gather_nd_u32_default; -mod gather_nd_u32_batch_dims1; -mod gather_nd_u32_batch_dims2; -mod resize_upsample_scales_nearest; -mod resize_downsample_scales_cubic; -mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; -mod resize_downsample_scales_cubic_align_corners; -mod resize_upsample_scales_linear; -mod resize_downsample_scales_linear_align_corners; -mod resize_downsample_scales_nearest; -mod resize_upsample_scales_cubic; -mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; -mod resize_upsample_scales_cubic_align_corners; -mod resize_upsample_scales_cubic_asymmetric; -mod resize_upsample_scales_linear_align_corners; -mod resize_upsample_sizes_nearest; -mod resize_upsample_sizes_cubic; -mod resize_downsample_sizes_cubic; -mod resize_downsample_sizes_nearest; -mod resize_upsample_scales_linear_half_pixel_symmetric; -mod resize_downsample_scales_cubic_antialias; -mod resize_downsample_scales_linear_antialias; -mod resize_downsample_sizes_cubic_antialias; -mod resize_downsample_sizes_linear_pytorch_half_pixel; -mod resize_tf_crop_and_resize; -mod resize_tf_crop_and_resize_extrapolation_value; -mod resize_upsample_scales_nearest_axes_2_3; -mod resize_upsample_scales_nearest_axes_3_2; -mod resize_upsample_sizes_nearest_axes_2_3; -mod resize_upsample_sizes_nearest_ceil_half_pixel; -mod resize_upsample_sizes_nearest_floor_align_corners; -mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; -mod resize_downsample_scales_linear_half_pixel_symmetric; -mod resize_downsample_sizes_nearest_not_larger; -mod resize_downsample_sizes_nearest_not_smaller; -mod resize_tf_crop_and_resize_axes_2_3; -mod resize_tf_crop_and_resize_axes_3_2; -mod resize_upsample_sizes_nearest_axes_3_2; -mod resize_upsample_sizes_nearest_not_larger; -mod resize_upsample_sizes_nearest_not_smaller; -mod compress_fp16x16_3d_default; -mod compress_fp16x16_3d_axis1; -mod compress_fp16x16_3d_axis2; -mod compress_fp16x16_3d_axis3; -mod compress_fp16x16_3d_noaxis; -mod compress_fp8x23_3d_default; -mod compress_fp8x23_3d_axis1; -mod compress_fp8x23_3d_axis2; -mod compress_i32_3d_default; -mod compress_i32_3d_axis1; -mod compress_i32_3d_axis2; -mod compress_i8_3d_default; -mod compress_i8_3d_axis1; -mod compress_i8_3d_axis2; -mod compress_u32_3d_default; -mod compress_u32_3d_axis1; -mod compress_u32_3d_axis2; -mod compress_u32_3d_axis2_2; -mod compress_u32_3d_axis3; -mod layer_normalization_default_axis; -mod layer_normalization_4d_axis0; -mod layer_normalization_4d_axis1; -mod layer_normalization_4d_axis2; -mod layer_normalization_4d_axis3; -mod layer_normalization_3d_axis0_epsilon; -mod layer_normalization_3d_axis_negative_3_epsilon; -mod layer_normalization_3d_axis1_epsilon; -mod layer_normalization_3d_axis2_epsilon; -mod layer_normalization_4d_axis_negative_4; -mod layer_normalization_4d_axis_negative_3; -mod layer_normalization_4d_axis_negative_2; -mod layer_normalization_4d_axis_negative_1; -mod layer_normalization_3d_axis_negative_2_epsilon; -mod layer_normalization_3d_axis_negative_1_epsilon; -mod layer_normalization_test; -mod split_u32_1d_equal_parts; -mod split_u32_2d_equal_parts; -mod split_u32_zero_size; -mod split_u32_1d_variable_parts; -mod split_u32_2d_variable_parts; -mod split_u32_1d_uneven; -mod split_u32_2d_uneven; -mod split_fp16x16_1d_equal_parts; -mod split_fp16x16_1d_variable_parts; -mod split_fp16x16_2d_equal_parts; -mod split_fp16x16_2d_variable_parts; -mod split_fp16x16_zero_size; -mod split_fp16x16_1d_uneven; -mod split_fp16x16_2d_uneven; -mod grid_sample; -mod grid_sample_cubic; -mod grid_sample_aligncorners; -mod grid_sample_nearest; -mod grid_sample_nearest_aligncorner; -mod grid_sample_padding_border; -mod grid_sample_padding_reflection; -mod grid_sample_padding_zeros; -mod col2im; -mod col2im_5D; -mod col2im_dilations; -mod col2im_pads; -mod col2im_strides; -mod random_uniform_like_fp16x16; -mod random_uniform_like_fp8x23; -mod range_fp8x23; -mod range_fp16x16; -mod range_i32; -mod range_i8; -mod range_u32; -mod hann_window_fp8x23; -mod hann_window_fp16x16; -mod hamming_window_fp16x16; -mod hamming_window_fp8x23; -mod blackman_window_fp16x16; -mod blackman_window_fp8x23; -mod split_to_sequence_fp16x16_1d_equal_parts; -mod split_to_sequence_fp16x16_1d_variable_parts; -mod split_to_sequence_fp16x16_2d_equal_parts; -mod split_to_sequence_fp16x16_2d_variable_parts; -mod split_to_sequence_fp16x16_zero_size; -mod split_to_sequence_fp16x16_1d_uneven; -mod split_to_sequence_fp16x16_2d_uneven; -mod split_to_sequence_u32_1d_equal_parts; -mod split_to_sequence_u32_1d_variable_parts; -mod split_to_sequence_u32_2d_equal_parts; -mod split_to_sequence_u32_2d_variable_parts; -mod split_to_sequence_u32_zero_size; -mod split_to_sequence_u32_1d_uneven; -mod split_to_sequence_u32_2d_uneven; -mod split_to_sequence_2d_scalar; -mod split_to_sequence_2d_nokeepdims; -mod split_to_sequence_1d_nokeepdims; -mod reverse_sequence_fp16x16_batch_equal_parts; -mod reverse_sequence_fp16x16_time_equal_parts; -mod reverse_sequence_i32_batch_equal_parts; -mod reverse_sequence_i32_time_equal_parts; -mod reverse_sequence_i8_batch_equal_parts; -mod reverse_sequence_i8_time_equal_parts; -mod reverse_sequence_u32_4x4_batch; -mod reverse_sequence_u32_4x4_time; -mod reverse_sequence_u32_3x3_batch; -mod reverse_sequence_u32_3x3_time; -mod reverse_sequence_different_dimensions_4_5; -mod reverse_sequence_different_dimensions_2_4; -mod reverse_sequence_different_dimensions_1_6; -mod reverse_sequence_different_dimensions_3x9_batch; -mod reverse_sequence_different_dimensions_3x9_time; -mod conv_transpose; -mod conv_transpose_1d; -mod conv_transpose_3d; -mod conv_transpose_attributes; -mod conv_transpose_autopad_same; -mod conv_transpose_dilations; -mod conv_transpose_pads; -mod conv_transpose_group_2; -mod conv_transpose_group_2_image_3; -mod depth_to_space_fp16x16; -mod depth_to_space_fp8x23; -mod depth_to_space_i32; -mod depth_to_space_i8; -mod depth_to_space_u32; -mod space_to_depth_fp16x16; -mod space_to_depth_fp8x23; -mod space_to_depth_i32; -mod space_to_depth_i8; -mod space_to_depth_u32; -mod scatter_nd_fp16x16_3d_default; -mod scatter_nd_fp16x16_3d_add; -mod scatter_nd_fp16x16_3d_mul; -mod scatter_nd_fp16x16_3d_max; -mod scatter_nd_fp16x16_3d_min; -mod scatter_nd_fp8x23_3d_default; -mod scatter_nd_fp8x23_3d_add; -mod scatter_nd_fp8x23_3d_mul; -mod scatter_nd_fp8x23_3d_max; -mod scatter_nd_fp8x23_3d_min; -mod scatter_nd_u32_default; -mod scatter_nd_u32_add; -mod scatter_nd_u32_mul; -mod scatter_nd_u32_max; -mod scatter_nd_u32_min; -mod conv_2D_with_padding; -mod conv_1D_no_padding; -mod conv_1D_with_padding; -mod conv_3D_no_padding; -mod conv_3D_with_padding; -mod conv_4D_no_padding; -mod conv_2D_with_2_groups; -mod conv_2D_with_autopad_same; -mod conv_2D_with_strides_asymmetric_padding; -mod conv_2D_with_strides_with_padding; -mod conv_4D_with_padding; -mod maxpool_2d; -mod maxpool_1d; -mod maxpool_1d_default; -mod maxpool_2d_ceil; mod maxpool_2d_constraint_index; mod maxpool_2d_default; mod maxpool_2d_dilations; diff --git a/tests/nodes/maxpool_2d_constraint_index.cairo b/tests/nodes/maxpool_2d_constraint_index.cairo index 72f1294ad..8698070c2 100644 --- a/tests/nodes/maxpool_2d_constraint_index.cairo +++ b/tests/nodes/maxpool_2d_constraint_index.cairo @@ -1,25 +1,25 @@ -//mod input_0; -//mod output_0; -// -// -//use orion::operators::nn::NNTrait; -//use orion::operators::tensor::U32TensorPartialEq; -//use orion::numbers::FixedTrait; -//use orion::operators::tensor::I32TensorPartialEq; -//use orion::utils::{assert_eq, assert_seq_eq}; -//use orion::operators::nn::FP16x16NN; -//use orion::operators::nn::U32NN; -// -// -//#[test] -//#[available_gas(2000000000)] -//fn test_maxpool_2d_constraint_index() { -// let input_0 = input_0::input_0(); -// let z_0 = output_0::output_0(); -// -// let (_, y_0) = NNTrait::max_pool(@input_0,Option::None,Option::None,Option::None,array![2, 2].span(),Option::None,Option::Some(1),Option::Some(array![2, 2].span()),1); -// -// assert_eq(y_0.unwrap(), z_0); -//} +mod input_0; +mod output_0; + + +use orion::operators::nn::NNTrait; +use orion::operators::tensor::U32TensorPartialEq; +use orion::numbers::FixedTrait; +use orion::operators::tensor::I32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::FP16x16NN; +use orion::operators::nn::U32NN; + + +#[test] +#[available_gas(2000000000)] +fn test_maxpool_2d_constraint_index() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let (_, y_0) = NNTrait::max_pool(@input_0,Option::None,Option::None,Option::None,array![2, 2].span(),Option::None,Option::Some(1),Option::Some(array![2, 2].span()),2); + + assert_eq(y_0.unwrap(), z_0); +} diff --git a/tests/nodes/maxpool_2d_constraint_index/output_0.cairo b/tests/nodes/maxpool_2d_constraint_index/output_0.cairo index 872d5499f..cf6e63b1b 100644 --- a/tests/nodes/maxpool_2d_constraint_index/output_0.cairo +++ b/tests/nodes/maxpool_2d_constraint_index/output_0.cairo @@ -1,9 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(1); shape.append(1); From 3f28532a02af4390397cdaf3648ea056329f8b3b Mon Sep 17 00:00:00 2001 From: chachaleo Date: Thu, 29 Feb 2024 11:25:47 +0100 Subject: [PATCH 06/68] fix test --- tests/nodes.cairo | 1041 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1041 insertions(+) diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 4ce28511d..fede65c03 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1,3 +1,1044 @@ +mod abs_fp16x16; +mod abs_fp8x23; +mod abs_i32; +mod abs_i8; +mod acos_fp16x16; +mod acos_fp8x23; +mod acosh_fp16x16; +mod acosh_fp8x23; +mod add_fp16x16; +mod add_fp16x16_broadcast; +mod add_fp8x23; +mod add_fp8x23_broadcast; +mod add_i32; +mod add_i32_broadcast; +mod add_i8; +mod add_i8_broadcast; +mod add_u32; +mod add_u32_broadcast; +mod argmax_fp16x16_1D_default; +mod argmax_fp16x16_1D_keepdims_false; +mod argmax_fp16x16_1D_last_index; +mod argmax_fp16x16_2D_default; +mod argmax_fp16x16_2D_keepdims_false; +mod argmax_fp16x16_2D_last_index; +mod argmax_fp16x16_3D_default; +mod argmax_fp16x16_3D_keepdims_false; +mod argmax_fp16x16_3D_last_index; +mod argmax_fp8x23_1D_default; +mod argmax_fp8x23_1D_keepdims_false; +mod argmax_fp8x23_1D_last_index; +mod argmax_fp8x23_2D_default; +mod argmax_fp8x23_2D_keepdims_false; +mod argmax_fp8x23_2D_last_index; +mod argmax_fp8x23_3D_default; +mod argmax_fp8x23_3D_keepdims_false; +mod argmax_fp8x23_3D_last_index; +mod argmax_i32_1D_default; +mod argmax_i32_1D_keepdims_false; +mod argmax_i32_1D_last_index; +mod argmax_i32_2D_default; +mod argmax_i32_2D_keepdims_false; +mod argmax_i32_2D_last_index; +mod argmax_i32_3D_default; +mod argmax_i32_3D_keepdims_false; +mod argmax_i32_3D_last_index; +mod argmax_i8_1D_default; +mod argmax_i8_1D_keepdims_false; +mod argmax_i8_1D_last_index; +mod argmax_i8_2D_default; +mod argmax_i8_2D_keepdims_false; +mod argmax_i8_2D_last_index; +mod argmax_i8_3D_default; +mod argmax_i8_3D_keepdims_false; +mod argmax_i8_3D_last_index; +mod argmax_u32_1D_default; +mod argmax_u32_1D_keepdims_false; +mod argmax_u32_1D_last_index; +mod argmax_u32_2D_default; +mod argmax_u32_2D_keepdims_false; +mod argmax_u32_2D_last_index; +mod argmax_u32_3D_default; +mod argmax_u32_3D_keepdims_false; +mod argmax_u32_3D_last_index; +mod argmin_fp16x16_1D_default; +mod argmin_fp16x16_1D_keepdims_false; +mod argmin_fp16x16_1D_last_index; +mod argmin_fp16x16_2D_default; +mod argmin_fp16x16_2D_keepdims_false; +mod argmin_fp16x16_2D_last_index; +mod argmin_fp16x16_3D_default; +mod argmin_fp16x16_3D_keepdims_false; +mod argmin_fp16x16_3D_last_index; +mod argmin_fp8x23_1D_default; +mod argmin_fp8x23_1D_keepdims_false; +mod argmin_fp8x23_1D_last_index; +mod argmin_fp8x23_2D_default; +mod argmin_fp8x23_2D_keepdims_false; +mod argmin_fp8x23_2D_last_index; +mod argmin_fp8x23_3D_default; +mod argmin_fp8x23_3D_keepdims_false; +mod argmin_fp8x23_3D_last_index; +mod argmin_i32_1D_default; +mod argmin_i32_1D_keepdims_false; +mod argmin_i32_1D_last_index; +mod argmin_i32_2D_default; +mod argmin_i32_2D_keepdims_false; +mod argmin_i32_2D_last_index; +mod argmin_i32_3D_default; +mod argmin_i32_3D_keepdims_false; +mod argmin_i32_3D_last_index; +mod argmin_i8_1D_default; +mod argmin_i8_1D_keepdims_false; +mod argmin_i8_1D_last_index; +mod argmin_i8_2D_default; +mod argmin_i8_2D_keepdims_false; +mod argmin_i8_2D_last_index; +mod argmin_i8_3D_default; +mod argmin_i8_3D_keepdims_false; +mod argmin_i8_3D_last_index; +mod argmin_u32_1D_default; +mod argmin_u32_1D_keepdims_false; +mod argmin_u32_1D_last_index; +mod argmin_u32_2D_default; +mod argmin_u32_2D_keepdims_false; +mod argmin_u32_2D_last_index; +mod argmin_u32_3D_default; +mod argmin_u32_3D_keepdims_false; +mod argmin_u32_3D_last_index; +mod asin_fp16x16; +mod asin_fp8x23; +mod asinh_fp16x16; +mod asinh_fp8x23; +mod atan_fp16x16; +mod atan_fp8x23; +mod ceil_fp16x16; +mod ceil_fp8x23; +mod concat_fp16x16_1d; +mod concat_fp16x16_2d; +mod concat_fp16x16_3d_default; +mod concat_fp16x16_3d_axis_1; +mod concat_fp16x16_3d_axis_2; +mod concat_fp16x16_3d_three_tensors_axis_1; +mod concat_fp16x16_3d_three_tensors_axis_2; +mod concat_fp8x23_1d; +mod concat_fp8x23_2d; +mod concat_fp8x23_3d_default; +mod concat_fp8x23_3d_axis_1; +mod concat_fp8x23_3d_axis_2; +mod concat_fp8x23_3d_three_tensors_axis_1; +mod concat_fp8x23_3d_three_tensors_axis_2; +mod concat_i32_1d; +mod concat_i32_2d; +mod concat_i32_3d_default; +mod concat_i32_3d_axis_1; +mod concat_i32_3d_axis_2; +mod concat_i32_3d_three_tensors_axis_1; +mod concat_i32_3d_three_tensors_axis_2; +mod concat_i8_1d; +mod concat_i8_2d; +mod concat_i8_3d_default; +mod concat_i8_3d_axis_1; +mod concat_i8_3d_axis_2; +mod concat_i8_3d_three_tensors_axis_1; +mod concat_i8_3d_three_tensors_axis_2; +mod concat_u32_1d; +mod concat_u32_2d; +mod concat_u32_3d_default; +mod concat_u32_3d_axis_1; +mod concat_u32_3d_axis_2; +mod concat_u32_3d_three_tensors_axis_1; +mod concat_u32_3d_three_tensors_axis_2; +mod cos_fp16x16; +mod cos_fp8x23; +mod cosh_fp16x16; +mod cosh_fp8x23; +mod cumsum_fp16x16_1d_default; +mod cumsum_fp16x16_1d_exclusive; +mod cumsum_fp16x16_1d_reverse; +mod cumsum_fp16x16_1d_reverse_exclusive; +mod cumsum_fp16x16_2d_axis_0; +mod cumsum_fp16x16_2d_axis_1; +mod cumsum_fp8x23_1d_default; +mod cumsum_fp8x23_1d_exclusive; +mod cumsum_fp8x23_1d_reverse; +mod cumsum_fp8x23_1d_reverse_exclusive; +mod cumsum_fp8x23_2d_axis_0; +mod cumsum_fp8x23_2d_axis_1; +mod cumsum_i32_1d_default; +mod cumsum_i32_1d_exclusive; +mod cumsum_i32_1d_reverse; +mod cumsum_i32_1d_reverse_exclusive; +mod cumsum_i32_2d_axis_0; +mod cumsum_i32_2d_axis_1; +mod cumsum_i8_1d_default; +mod cumsum_i8_1d_exclusive; +mod cumsum_i8_1d_reverse; +mod cumsum_i8_1d_reverse_exclusive; +mod cumsum_i8_2d_axis_0; +mod cumsum_i8_2d_axis_1; +mod cumsum_u32_1d_default; +mod cumsum_u32_1d_exclusive; +mod cumsum_u32_1d_reverse; +mod cumsum_u32_1d_reverse_exclusive; +mod cumsum_u32_2d_axis_0; +mod cumsum_u32_2d_axis_1; +mod div_fp16x16; +mod div_fp16x16_broadcast; +mod div_fp8x23; +mod div_fp8x23_broadcast; +mod div_i32; +mod div_i32_broadcast; +mod div_i8; +mod div_i8_broadcast; +mod div_u32; +mod div_u32_broadcast; +mod equal_fp16x16; +mod equal_fp16x16_broadcast; +mod equal_fp8x23; +mod equal_fp8x23_broadcast; +mod equal_i32; +mod equal_i32_broadcast; +mod equal_i8; +mod equal_i8_broadcast; +mod equal_u32; +mod equal_u32_broadcast; +mod exp_fp16x16; +mod exp_fp8x23; +mod less_equal_fp16x16; +mod less_equal_fp16x16_broadcast; +mod less_equal_fp8x23; +mod less_equal_fp8x23_broadcast; +mod less_equal_i32; +mod less_equal_i32_broadcast; +mod less_equal_i8; +mod less_equal_i8_broadcast; +mod less_equal_u32; +mod less_equal_u32_broadcast; +mod greater_fp16x16; +mod greater_fp16x16_broadcast; +mod greater_fp8x23; +mod greater_fp8x23_broadcast; +mod greater_i32; +mod greater_i32_broadcast; +mod greater_i8; +mod greater_i8_broadcast; +mod greater_u32; +mod greater_u32_broadcast; +mod leaky_relu_fp16x16; +mod leaky_relu_fp8x23; +mod linear_fp16x16; +mod linear_fp8x23; +mod linear_i32; +mod linear_i8; +mod linear_u32; +mod log_fp16x16; +mod log_fp8x23; +mod logsoftmax_fp16x16_axis_0; +mod logsoftmax_fp16x16_axis_1; +mod logsoftmax_fp8x23_axis_0; +mod logsoftmax_fp8x23_axis_1; +mod matmul_fp16x16_1d; +mod matmul_fp16x16_2x2; +mod matmul_fp16x16_2x1; +mod matmul_fp16x16_1x2; +mod matmul_fp8x23_1d; +mod matmul_fp8x23_2x2; +mod matmul_fp8x23_2x1; +mod matmul_fp8x23_1x2; +mod matmul_i32_1d; +mod matmul_i32_2x2; +mod matmul_i32_2x1; +mod matmul_i32_1x2; +mod matmul_i8_1d; +mod matmul_i8_2x2; +mod matmul_i8_2x1; +mod matmul_i8_1x2; +mod matmul_u32_1d; +mod matmul_u32_2x2; +mod matmul_u32_2x1; +mod matmul_u32_1x2; +mod mul_fp16x16; +mod mul_fp16x16_broadcast; +mod mul_fp8x23; +mod mul_fp8x23_broadcast; +mod mul_i32; +mod mul_i32_broadcast; +mod mul_i8; +mod mul_i8_broadcast; +mod mul_u32; +mod mul_u32_broadcast; +mod or_fp16x16; +mod or_fp16x16_broadcast; +mod or_fp8x23; +mod or_fp8x23_broadcast; +mod or_i32; +mod or_i32_broadcast; +mod or_i8; +mod or_i8_broadcast; +mod or_u32; +mod or_u32_broadcast; +mod reduce_sum_fp16x16_1D; +mod reduce_sum_fp16x16_2D_default; +mod reduce_sum_fp16x16_2D_keepdims; +mod reduce_sum_fp16x16_2D_axis_1; +mod reduce_sum_fp8x23_1D; +mod reduce_sum_fp8x23_2D_default; +mod reduce_sum_fp8x23_2D_keepdims; +mod reduce_sum_fp8x23_2D_axis_1; +mod reduce_sum_i32_1D; +mod reduce_sum_i32_2D_default; +mod reduce_sum_i32_2D_keepdims; +mod reduce_sum_i32_2D_axis_1; +mod reduce_sum_i8_1D; +mod reduce_sum_i8_2D_default; +mod reduce_sum_i8_2D_keepdims; +mod reduce_sum_i8_2D_axis_1; +mod reduce_sum_u32_1D; +mod reduce_sum_u32_2D_default; +mod reduce_sum_u32_2D_keepdims; +mod reduce_sum_u32_2D_axis_1; +mod relu_fp16x16; +mod relu_fp8x23; +mod relu_i32; +mod relu_i8; +mod sigmoid_fp16x16; +mod sigmoid_fp8x23; +mod sin_fp16x16; +mod sin_fp8x23; +mod sinh_fp16x16; +mod sinh_fp8x23; +mod softmax_fp16x16; +mod softmax_fp8x23; +mod softplus_fp8x23; +mod softplus_fp16x16; +mod softsign_fp8x23; +mod softsign_fp16x16; +mod sqrt_fp16x16; +mod sqrt_fp8x23; +mod sub_fp16x16; +mod sub_fp16x16_broadcast; +mod sub_fp8x23; +mod sub_fp8x23_broadcast; +mod sub_i32; +mod sub_i32_broadcast; +mod sub_i8; +mod sub_i8_broadcast; +mod sub_u32; +mod sub_u32_broadcast; +mod tanh_fp16x16; +mod tanh_fp8x23; +mod transpose_fp16x16_2d; +mod transpose_fp16x16_3d; +mod transpose_fp8x23_2d; +mod transpose_fp8x23_3d; +mod transpose_i32_2d; +mod transpose_i32_3d; +mod transpose_i8_2d; +mod transpose_i8_3d; +mod transpose_u32_2d; +mod transpose_u32_3d; +mod xor_fp16x16; +mod xor_fp16x16_broadcast; +mod xor_fp8x23; +mod xor_fp8x23_broadcast; +mod xor_i32; +mod xor_i32_broadcast; +mod xor_i8; +mod xor_i8_broadcast; +mod xor_u32; +mod xor_u32_broadcast; +mod less_fp16x16; +mod less_fp16x16_broadcast; +mod less_fp8x23; +mod less_fp8x23_broadcast; +mod less_i32; +mod less_i32_broadcast; +mod less_i8; +mod less_i8_broadcast; +mod less_u32; +mod less_u32_broadcast; +mod greater_equal_fp16x16; +mod greater_equal_fp16x16_broadcast; +mod greater_equal_fp8x23; +mod greater_equal_fp8x23_broadcast; +mod greater_equal_i32; +mod greater_equal_i32_broadcast; +mod greater_equal_i8; +mod greater_equal_i8_broadcast; +mod greater_equal_u32; +mod greater_equal_u32_broadcast; +mod slice_fp16x16_2d; +mod slice_fp16x16_3d; +mod slice_fp8x23_2d; +mod slice_fp8x23_3d; +mod slice_i32_2d; +mod slice_i32_3d; +mod slice_i8_2d; +mod slice_i8_3d; +mod slice_u32_2d; +mod slice_u32_3d; +mod gather_fp8x23_3d_default; +mod gather_fp8x23_3d_axis1; +mod gather_fp8x23_3d_axis2; +mod gather_fp16x16_3d_default; +mod gather_fp16x16_3d_axis1; +mod gather_fp16x16_3d_axis2; +mod gather_i8_3d_default; +mod gather_i8_3d_axis1; +mod gather_i8_3d_axis2; +mod gather_i32_3d_default; +mod gather_i32_3d_axis1; +mod gather_i32_3d_axis2; +mod gather_u32_3d_default; +mod gather_u32_3d_axis1; +mod gather_u32_3d_axis2; +mod nonzero_fp16x16_2d; +mod nonzero_fp16x16_3d; +mod nonzero_fp8x23_2d; +mod nonzero_fp8x23_3d; +mod nonzero_i32_2d; +mod nonzero_i32_3d; +mod nonzero_i8_2d; +mod nonzero_i8_3d; +mod nonzero_u32_2d; +mod nonzero_u32_3d; +mod squeeze_fP16x16; +mod squeeze_fP8x23; +mod squeeze_i32; +mod squeeze_i8; +mod squeeze_u32; +mod unsqueeze_fp16x16_2d; +mod unsqueeze_fp16x16_3d; +mod unsqueeze_fp8x23_2d; +mod unsqueeze_fp8x23_3d; +mod unsqueeze_i32_2d; +mod unsqueeze_i32_3d; +mod unsqueeze_i8_2d; +mod unsqueeze_i8_3d; +mod unsqueeze_u32_2d; +mod unsqueeze_u32_3d; +mod sign_fP16x16; +mod sign_fP8x23; +mod sign_fail; +mod sign_i32; +mod sign_i8; +mod clip_fp16x16_2d; +mod clip_fp16x16_3d; +mod clip_fp8x23_2d; +mod clip_fp8x23_3d; +mod clip_i32_2d; +mod clip_i32_3d; +mod clip_i8_2d; +mod clip_i8_3d; +mod clip_u32_2d; +mod clip_u32_3d; +mod identity_fP16x16; +mod identity_fP8x23; +mod identity_i32; +mod identity_i8; +mod identity_u32; +mod thresholded_relu_fp16x16; +mod thresholded_relu_fp8x23; +mod hard_sigmoid_fp8x23; +mod hard_sigmoid_fp16x16; +mod neg_fp16x16; +mod neg_fp8x23; +mod neg_i32; +mod neg_i8; +mod gemm_all_attributes; +mod gemm_alpha; +mod gemm_beta; +mod gemm_default_matrix_bias; +mod gemm_default_vector_bias; +mod gemm_default_no_bias; +mod gemm_transposeA; +mod gemm_transposeB; +mod min_fp16x16_three_tensors; +mod min_fp16x16_broadcast_three_tensors; +mod min_fp16x16_two_tensors; +mod min_fp16x16_broadcast_two_tensors; +mod min_fp8x23_three_tensors; +mod min_fp8x23_broadcast_three_tensors; +mod min_fp8x23_two_tensors; +mod min_fp8x23_broadcast_two_tensors; +mod min_i32_three_tensors; +mod min_i32_broadcast_three_tensors; +mod min_i32_two_tensors; +mod min_i32_broadcast_two_tensors; +mod min_i8_three_tensors; +mod min_i8_broadcast_three_tensors; +mod min_i8_two_tensors; +mod min_i8_broadcast_two_tensors; +mod min_u32_three_tensors; +mod min_u32_broadcast_three_tensors; +mod min_u32_two_tensors; +mod min_u32_broadcast_two_tensors; +mod where_fp16x16; +mod where_fp16x16_broadcast; +mod where_fp8x23; +mod where_fp8x23_broadcast; +mod where_i32; +mod where_i32_broadcast; +mod where_i8; +mod where_i8_broadcast; +mod where_u32; +mod where_u32_broadcast; +mod not_bool; +mod round_fp16x16; +mod round_fp8x23; +mod max_fp16x16_three_tensors; +mod max_fp16x16_broadcast_three_tensors; +mod max_fp16x16_two_tensors; +mod max_fp16x16_broadcast_two_tensors; +mod max_fp8x23_three_tensors; +mod max_fp8x23_broadcast_three_tensors; +mod max_fp8x23_two_tensors; +mod max_fp8x23_broadcast_two_tensors; +mod max_i32_three_tensors; +mod max_i32_broadcast_three_tensors; +mod max_i32_two_tensors; +mod max_i32_broadcast_two_tensors; +mod max_i8_three_tensors; +mod max_i8_broadcast_three_tensors; +mod max_i8_two_tensors; +mod max_i8_broadcast_two_tensors; +mod max_u32_three_tensors; +mod max_u32_broadcast_three_tensors; +mod max_u32_two_tensors; +mod max_u32_broadcast_two_tensors; +mod scatter_fp16x16_3d_default; +mod scatter_fp16x16_3d_axis1; +mod scatter_fp16x16_3d_axis1_add; +mod scatter_fp8x23_default; +mod scatter_fp8x23_axis1; +mod scatter_fp8x23_mul; +mod scatter_i8_default; +mod scatter_i8_axis1; +mod scatter_i8_axis1_max; +mod scatter_u32_default; +mod scatter_u32_axis1; +mod scatter_u32_add; +mod array_feature_extractor_1D_i32; +mod array_feature_extractor_1D_fp8x23; +mod array_feature_extractor_1D_fp16x16; +mod array_feature_extractor_2D_i32; +mod array_feature_extractor_2D_fp8x23; +mod array_feature_extractor_2D_fp16x16; +mod array_feature_extractor_3D_i32; +mod array_feature_extractor_3D_fp8x23; +mod array_feature_extractor_3D_fp16x16; +mod binarizer_fp16x16; +mod binarizer_fp8x23; +mod tril_fp16x16; +mod tril_fp16x16_neg; +mod tril_fp16x16_one_row; +mod tril_fp16x16_out_neg; +mod tril_fp16x16_out_pos; +mod tril_fp16x16_pos; +mod tril_fp16x16_square; +mod tril_fp16x16_square_neg; +mod tril_fp16x16_zero; +mod triu_fp16x16; +mod triu_fp16x16_neg; +mod triu_fp16x16_one_row; +mod triu_fp16x16_out_neg; +mod triu_fp16x16_out_pos; +mod triu_fp16x16_pos; +mod triu_fp16x16_square; +mod triu_fp16x16_square_neg; +mod triu_fp16x16_zero; +mod tril_fp8x23; +mod tril_fp8x23_neg; +mod tril_fp8x23_one_row; +mod tril_fp8x23_out_neg; +mod tril_fp8x23_out_pos; +mod tril_fp8x23_pos; +mod tril_fp8x23_square; +mod tril_fp8x23_square_neg; +mod tril_fp8x23_zero; +mod triu_fp8x23; +mod triu_fp8x23_neg; +mod triu_fp8x23_one_row; +mod triu_fp8x23_out_neg; +mod triu_fp8x23_out_pos; +mod triu_fp8x23_pos; +mod triu_fp8x23_square; +mod triu_fp8x23_square_neg; +mod triu_fp8x23_zero; +mod tril_i32; +mod tril_neg_i32; +mod tril_i32_one_row; +mod tril_i32_out_neg; +mod tril_i32_out_pos; +mod tril_i32_pos; +mod tril_i32_square; +mod tril_i32_square_neg; +mod tril_i32_zero; +mod triu_i32; +mod triu_i32_neg; +mod triu_i32_one_row; +mod triu_i32_out_neg; +mod triu_i32_out_pos; +mod triu_i32_pos; +mod triu_i32_square; +mod triu_i32_square_neg; +mod triu_i32_zero; +mod tril_i8; +mod tril_i8_neg; +mod tril_i8_one_row; +mod tril_i8_out_neg; +mod tril_i8_out_pos; +mod tril_i8_pos; +mod tril_i8_square; +mod tril_i8_square_neg; +mod tril_i8_zero; +mod triu_i8; +mod triu_i8_neg; +mod triu_i8_one_row; +mod triu_i8_out_neg; +mod triu_i8_out_pos; +mod triu_i8_pos; +mod triu_i8_square; +mod triu_i8_square_neg; +mod triu_i8_zero; +mod tril_u32; +mod tril_u32_neg; +mod tril_u32_one_row; +mod tril_u32_out_neg; +mod tril_u32_out_pos; +mod tril_u32_pos; +mod tril_u32_square; +mod tril_u32_square_neg; +mod tril_u32_zero; +mod triu_u32; +mod triu_u32_neg; +mod triu_u32_one_row; +mod triu_u32_out_neg; +mod triu_u32_out_pos; +mod triu_u32_pos; +mod triu_u32_square; +mod triu_u32_square_neg; +mod triu_u32_zero; +mod reduce_sum_square_fp16x16_export_do_not_keepdims; +mod reduce_sum_square_fp16x16_export_keepdims; +mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; +mod reduce_sum_square_fp8x23_export_do_not_keepdims; +mod reduce_sum_square_fp8x23_export_keepdims; +mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; +mod reduce_sum_square_i32_export_do_not_keepdims; +mod reduce_sum_square_i32_export_keepdims; +mod reduce_sum_square_i32_export_negative_axes_keepdims; +mod reduce_sum_square_i8_export_do_not_keepdims; +mod reduce_sum_square_i8_export_keepdims; +mod reduce_sum_square_i8_export_negative_axes_keepdims; +mod reduce_sum_square_u32_export_do_not_keepdims; +mod reduce_sum_square_u32_export_keepdims; +mod reduce_sum_square_u32_export_negative_axes_keepdims; +mod reduce_l2_fp16x16_export_do_not_keepdims; +mod reduce_l2_fp16x16_export_keepdims; +mod reduce_l2_fp16x16_export_negative_axes_keepdims; +mod reduce_l2_fp8x23_export_do_not_keepdims; +mod reduce_l2_fp8x23_export_keepdims; +mod reduce_l2_fp8x23_export_negative_axes_keepdims; +mod reduce_l1_fp16x16_export_do_not_keepdims; +mod reduce_l1_fp16x16_export_keepdims; +mod reduce_l1_fp16x16_export_negative_axes_keepdims; +mod reduce_l1_fp8x23_export_do_not_keepdims; +mod reduce_l1_fp8x23_export_keepdims; +mod reduce_l1_fp8x23_export_negative_axes_keepdims; +mod reduce_l1_i32_export_do_not_keepdims; +mod reduce_l1_i32_export_keepdims; +mod reduce_l1_i32_export_negative_axes_keepdims; +mod reduce_l1_i8_export_do_not_keepdims; +mod reduce_l1_i8_export_keepdims; +mod reduce_l1_i8_export_negative_axes_keepdims; +mod reduce_l1_u32_export_do_not_keepdims; +mod reduce_l1_u32_export_keepdims; +mod reduce_l1_u32_export_negative_axes_keepdims; +mod reduce_prod_fp16x16_1D; +mod reduce_prod_fp16x16_2D_default; +mod reduce_prod_fp16x16_2D_keepdims; +mod reduce_prod_fp16x16_2D_axis_1; +mod reduce_prod_fp8x23_1D; +mod reduce_prod_fp8x23_2D_default; +mod reduce_prod_fp8x23_2D_keepdims; +mod reduce_prod_fp8x23_2D_axis_1; +mod reduce_prod_i32_1D; +mod reduce_prod_i32_2D_default; +mod reduce_prod_i32_2D_keepdims; +mod reduce_prod_i32_2D_axis_1; +mod reduce_prod_i8_1D; +mod reduce_prod_i8_2D_default; +mod reduce_prod_i8_2D_keepdims; +mod reduce_prod_i8_2D_axis_1; +mod reduce_prod_u32_1D; +mod reduce_prod_u32_2D_default; +mod reduce_prod_u32_2D_keepdims; +mod reduce_prod_u32_2D_axis_1; +mod gather_elements_fp16x16_3d_default; +mod gather_elements_fp16x16_3d_axis1; +mod gather_elements_fp16x16_3d_axis2; +mod gather_elements_fp8x23_3d_default; +mod gather_elements_fp8x23_3d_axis1; +mod gather_elements_fp8x23_3d_axis2; +mod gather_elements_i8_3d_default; +mod gather_elements_i8_3d_axis1; +mod gather_elements_i32_3d_default; +mod gather_elements_i32_3d_axis1; +mod gather_elements_i32_3d_axis2; +mod gather_elements_u32_default; +mod gather_elements_u32_axis1; +mod gather_elements_u32_axis2; +mod gather_elements_u32_axis3; +mod sequence_length_fp16x16; +mod sequence_length_fp16x16_broadcast; +mod sequence_length_fp8x23; +mod sequence_length_fp8x23_broadcast; +mod sequence_length_i32; +mod sequence_length_i32_broadcast; +mod sequence_length_i8; +mod sequence_length_i8_broadcast; +mod sequence_length_u32; +mod sequence_length_u32_broadcast; +mod sequence_at_u32_positive; +mod sequence_at_u32_negative; +mod sequence_at_fp16x16_positive; +mod sequence_at_fp16x16_negative; +mod sequence_at_fp8x23_positive; +mod sequence_at_fp8x23_negative; +mod sequence_at_i32_positive; +mod sequence_at_i32_negative; +mod sequence_at_i8_positive; +mod sequence_at_i8_negative; +mod reduce_min_fp16x16_1D; +mod reduce_min_fp16x16_2D_default; +mod reduce_min_fp16x16_2D_keepdims; +mod reduce_min_fp16x16_2D_axis_1; +mod reduce_min_fp8x23_1D; +mod reduce_min_fp8x23_2D_default; +mod reduce_min_fp8x23_2D_keepdims; +mod reduce_min_fp8x23_2D_axis_1; +mod reduce_min_i32_1D; +mod reduce_min_i32_2D_default; +mod reduce_min_i32_2D_keepdims; +mod reduce_min_i32_2D_axis_1; +mod reduce_min_i8_1D; +mod reduce_min_i8_2D_default; +mod reduce_min_i8_2D_keepdims; +mod reduce_min_i8_2D_axis_1; +mod reduce_min_u32_1D; +mod reduce_min_u32_2D_default; +mod reduce_min_u32_2D_keepdims; +mod reduce_min_u32_2D_axis_1; +mod sequence_construct_fp16x16; +mod sequence_construct_fp8x23; +mod sequence_construct_i32; +mod sequence_construct_i8; +mod sequence_construct_u32; +mod shrink_hard_fp16x16; +mod shrink_soft_fp16x16; +mod shrink_hard_fp8x23; +mod shrink_soft_fp8x23; +mod sequence_empty_fp16x16; +mod sequence_empty_fp8x23; +mod sequence_empty_i32; +mod sequence_empty_i8; +mod sequence_empty_u32; +mod reduce_mean_fp16x16_1D; +mod reduce_mean_fp16x16_2D_default; +mod reduce_mean_fp16x16_2D_keepdims; +mod reduce_mean_fp16x16_2D_axis_1; +mod reduce_mean_fp8x23_1D; +mod reduce_mean_fp8x23_2D_default; +mod reduce_mean_fp8x23_2D_keepdims; +mod reduce_mean_fp8x23_2D_axis_1; +mod reduce_mean_i32_1D; +mod reduce_mean_i32_2D_default; +mod reduce_mean_i32_2D_keepdims; +mod reduce_mean_i32_2D_axis_1; +mod reduce_mean_i8_1D; +mod reduce_mean_i8_2D_default; +mod reduce_mean_i8_2D_keepdims; +mod reduce_mean_i8_2D_axis_1; +mod reduce_mean_u32_1D; +mod reduce_mean_u32_2D_default; +mod reduce_mean_u32_2D_keepdims; +mod reduce_mean_u32_2D_axis_1; +mod pow_fp16x16; +mod pow_fp16x16_broadcast; +mod pow_fp8x23; +mod pow_fp8x23_broadcast; +mod sequence_erase_u32_positive; +mod sequence_erase_u32_negative; +mod sequence_erase_u32_empty; +mod sequence_erase_fp16x16_positive; +mod sequence_erase_fp16x16_negative; +mod sequence_erase_fp16x16_empty; +mod sequence_erase_fp8x23_positive; +mod sequence_erase_fp8x23_negative; +mod sequence_erase_fp8x23_empty; +mod sequence_erase_i32_positive; +mod sequence_erase_i32_negative; +mod sequence_erase_i32_empty; +mod sequence_erase_i8_positive; +mod sequence_erase_i8_negative; +mod sequence_erase_i8_empty; +mod sequence_insert_fp16x16; +mod sequence_insert_fp8x23; +mod sequence_insert_i32; +mod sequence_insert_i8; +mod sequence_insert_u32; +mod concat_from_sequence_fp8x23_new_axis_zero; +mod concat_from_sequence_fp8x23_new_axis_one; +mod concat_from_sequence_fp8x23_new_axis_default; +mod concat_from_sequence_fp16x16_new_axis_zero; +mod concat_from_sequence_fp16x16_new_axis_one; +mod concat_from_sequence_fp16x16_new_axis_default; +mod concat_from_sequence_i32_new_axis_zero; +mod concat_from_sequence_i32_new_axis_one; +mod concat_from_sequence_i32_new_axis_default; +mod concat_from_sequence_i8_new_axis_zero; +mod concat_from_sequence_i8_new_axis_one; +mod concat_from_sequence_i8_new_axis_default; +mod concat_from_sequence_u32_new_axis_zero; +mod concat_from_sequence_u32_new_axis_one; +mod concat_from_sequence_u32_new_axis_default; +mod is_nan_fp16x16; +mod is_nan_fp8x23; +mod is_inf_fp16x16; +mod is_inf_fp8x23; +mod is_inf_i32; +mod is_inf_i8; +mod is_inf_u32; +mod is_pos_inf_fp16x16; +mod is_neg_inf_fp16x16; +mod is_pos_inf_fp8x23; +mod is_neg_inf_fp8x23; +mod is_pos_inf_i32; +mod is_neg_inf_i32; +mod is_pos_inf_i8; +mod is_neg_inf_i8; +mod reduce_log_sum_fp8x23_export_do_not_keepdims; +mod reduce_log_sum_fp8x23_export_keepdims; +mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; +mod reduce_log_sum_fp16x16_export_do_not_keepdims; +mod reduce_log_sum_fp16x16_export_keepdims; +mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; +mod and_bool; +mod erf_fp16x16; +mod erf_fp8x23; +mod unique_fp16x16_without_axis_sorted; +mod unique_fp16x16_with_axis_zero_sorted; +mod unique_u32_without_axis_sorted; +mod unique_u32_without_axis_not_sorted; +mod unique_u32_with_axis_zero_sorted; +mod unique_u32_with_axis_zero_not_sorted; +mod unique_u32_with_axis_one_sorted; +mod unique_u32_with_axis_one_not_sorted; +mod gather_nd_fp16x16_3d_default; +mod gather_nd_fp16x16_3d_batch_dims1; +mod gather_nd_fp16x16_3d_batch_dims2; +mod gather_nd_fp8x23_3d_default; +mod gather_nd_fp8x23_3d_batch_dims1; +mod gather_nd_fp8x23_3d_batch_dims2; +mod gather_nd_i32_3d_default; +mod gather_nd_i32_3d_batch_dims1; +mod gather_nd_i32_3d_batch_dims2; +mod gather_nd_i8_3d_default; +mod gather_nd_i8_3d_batch_dims1; +mod gather_nd_u32_default; +mod gather_nd_u32_batch_dims1; +mod gather_nd_u32_batch_dims2; +mod resize_upsample_scales_nearest; +mod resize_downsample_scales_cubic; +mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; +mod resize_downsample_scales_cubic_align_corners; +mod resize_upsample_scales_linear; +mod resize_downsample_scales_linear_align_corners; +mod resize_downsample_scales_nearest; +mod resize_upsample_scales_cubic; +mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; +mod resize_upsample_scales_cubic_align_corners; +mod resize_upsample_scales_cubic_asymmetric; +mod resize_upsample_scales_linear_align_corners; +mod resize_upsample_sizes_nearest; +mod resize_upsample_sizes_cubic; +mod resize_downsample_sizes_cubic; +mod resize_downsample_sizes_nearest; +mod resize_upsample_scales_linear_half_pixel_symmetric; +mod resize_downsample_scales_cubic_antialias; +mod resize_downsample_scales_linear_antialias; +mod resize_downsample_sizes_cubic_antialias; +mod resize_downsample_sizes_linear_pytorch_half_pixel; +mod resize_tf_crop_and_resize; +mod resize_tf_crop_and_resize_extrapolation_value; +mod resize_upsample_scales_nearest_axes_2_3; +mod resize_upsample_scales_nearest_axes_3_2; +mod resize_upsample_sizes_nearest_axes_2_3; +mod resize_upsample_sizes_nearest_ceil_half_pixel; +mod resize_upsample_sizes_nearest_floor_align_corners; +mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; +mod resize_downsample_scales_linear_half_pixel_symmetric; +mod resize_downsample_sizes_nearest_not_larger; +mod resize_downsample_sizes_nearest_not_smaller; +mod resize_tf_crop_and_resize_axes_2_3; +mod resize_tf_crop_and_resize_axes_3_2; +mod resize_upsample_sizes_nearest_axes_3_2; +mod resize_upsample_sizes_nearest_not_larger; +mod resize_upsample_sizes_nearest_not_smaller; +mod compress_fp16x16_3d_default; +mod compress_fp16x16_3d_axis1; +mod compress_fp16x16_3d_axis2; +mod compress_fp16x16_3d_axis3; +mod compress_fp16x16_3d_noaxis; +mod compress_fp8x23_3d_default; +mod compress_fp8x23_3d_axis1; +mod compress_fp8x23_3d_axis2; +mod compress_i32_3d_default; +mod compress_i32_3d_axis1; +mod compress_i32_3d_axis2; +mod compress_i8_3d_default; +mod compress_i8_3d_axis1; +mod compress_i8_3d_axis2; +mod compress_u32_3d_default; +mod compress_u32_3d_axis1; +mod compress_u32_3d_axis2; +mod compress_u32_3d_axis2_2; +mod compress_u32_3d_axis3; +mod layer_normalization_default_axis; +mod layer_normalization_4d_axis0; +mod layer_normalization_4d_axis1; +mod layer_normalization_4d_axis2; +mod layer_normalization_4d_axis3; +mod layer_normalization_3d_axis0_epsilon; +mod layer_normalization_3d_axis_negative_3_epsilon; +mod layer_normalization_3d_axis1_epsilon; +mod layer_normalization_3d_axis2_epsilon; +mod layer_normalization_4d_axis_negative_4; +mod layer_normalization_4d_axis_negative_3; +mod layer_normalization_4d_axis_negative_2; +mod layer_normalization_4d_axis_negative_1; +mod layer_normalization_3d_axis_negative_2_epsilon; +mod layer_normalization_3d_axis_negative_1_epsilon; +mod layer_normalization_test; +mod split_u32_1d_equal_parts; +mod split_u32_2d_equal_parts; +mod split_u32_zero_size; +mod split_u32_1d_variable_parts; +mod split_u32_2d_variable_parts; +mod split_u32_1d_uneven; +mod split_u32_2d_uneven; +mod split_fp16x16_1d_equal_parts; +mod split_fp16x16_1d_variable_parts; +mod split_fp16x16_2d_equal_parts; +mod split_fp16x16_2d_variable_parts; +mod split_fp16x16_zero_size; +mod split_fp16x16_1d_uneven; +mod split_fp16x16_2d_uneven; +mod grid_sample; +mod grid_sample_cubic; +mod grid_sample_aligncorners; +mod grid_sample_nearest; +mod grid_sample_nearest_aligncorner; +mod grid_sample_padding_border; +mod grid_sample_padding_reflection; +mod grid_sample_padding_zeros; +mod col2im; +mod col2im_5D; +mod col2im_dilations; +mod col2im_pads; +mod col2im_strides; +mod random_uniform_like_fp16x16; +mod random_uniform_like_fp8x23; +mod range_fp8x23; +mod range_fp16x16; +mod range_i32; +mod range_i8; +mod range_u32; +mod hann_window_fp8x23; +mod hann_window_fp16x16; +mod hamming_window_fp16x16; +mod hamming_window_fp8x23; +mod blackman_window_fp16x16; +mod blackman_window_fp8x23; +mod split_to_sequence_fp16x16_1d_equal_parts; +mod split_to_sequence_fp16x16_1d_variable_parts; +mod split_to_sequence_fp16x16_2d_equal_parts; +mod split_to_sequence_fp16x16_2d_variable_parts; +mod split_to_sequence_fp16x16_zero_size; +mod split_to_sequence_fp16x16_1d_uneven; +mod split_to_sequence_fp16x16_2d_uneven; +mod split_to_sequence_u32_1d_equal_parts; +mod split_to_sequence_u32_1d_variable_parts; +mod split_to_sequence_u32_2d_equal_parts; +mod split_to_sequence_u32_2d_variable_parts; +mod split_to_sequence_u32_zero_size; +mod split_to_sequence_u32_1d_uneven; +mod split_to_sequence_u32_2d_uneven; +mod split_to_sequence_2d_scalar; +mod split_to_sequence_2d_nokeepdims; +mod split_to_sequence_1d_nokeepdims; +mod reverse_sequence_fp16x16_batch_equal_parts; +mod reverse_sequence_fp16x16_time_equal_parts; +mod reverse_sequence_i32_batch_equal_parts; +mod reverse_sequence_i32_time_equal_parts; +mod reverse_sequence_i8_batch_equal_parts; +mod reverse_sequence_i8_time_equal_parts; +mod reverse_sequence_u32_4x4_batch; +mod reverse_sequence_u32_4x4_time; +mod reverse_sequence_u32_3x3_batch; +mod reverse_sequence_u32_3x3_time; +mod reverse_sequence_different_dimensions_4_5; +mod reverse_sequence_different_dimensions_2_4; +mod reverse_sequence_different_dimensions_1_6; +mod reverse_sequence_different_dimensions_3x9_batch; +mod reverse_sequence_different_dimensions_3x9_time; +mod conv_transpose; +mod conv_transpose_1d; +mod conv_transpose_3d; +mod conv_transpose_attributes; +mod conv_transpose_autopad_same; +mod conv_transpose_dilations; +mod conv_transpose_pads; +mod conv_transpose_group_2; +mod conv_transpose_group_2_image_3; +mod depth_to_space_fp16x16; +mod depth_to_space_fp8x23; +mod depth_to_space_i32; +mod depth_to_space_i8; +mod depth_to_space_u32; +mod space_to_depth_fp16x16; +mod space_to_depth_fp8x23; +mod space_to_depth_i32; +mod space_to_depth_i8; +mod space_to_depth_u32; +mod scatter_nd_fp16x16_3d_default; +mod scatter_nd_fp16x16_3d_add; +mod scatter_nd_fp16x16_3d_mul; +mod scatter_nd_fp16x16_3d_max; +mod scatter_nd_fp16x16_3d_min; +mod scatter_nd_fp8x23_3d_default; +mod scatter_nd_fp8x23_3d_add; +mod scatter_nd_fp8x23_3d_mul; +mod scatter_nd_fp8x23_3d_max; +mod scatter_nd_fp8x23_3d_min; +mod scatter_nd_u32_default; +mod scatter_nd_u32_add; +mod scatter_nd_u32_mul; +mod scatter_nd_u32_max; +mod scatter_nd_u32_min; +mod conv_2D_with_padding; +mod conv_1D_no_padding; +mod conv_1D_with_padding; +mod conv_3D_no_padding; +mod conv_3D_with_padding; +mod conv_4D_no_padding; +mod conv_2D_with_2_groups; +mod conv_2D_with_autopad_same; +mod conv_2D_with_strides_asymmetric_padding; +mod conv_2D_with_strides_with_padding; +mod conv_4D_with_padding; mod maxpool_2d_constraint_index; mod maxpool_2d_default; mod maxpool_2d_dilations; From 5b0cdc09b5059170130801bb825fd15d11096465 Mon Sep 17 00:00:00 2001 From: chachaleo Date: Mon, 4 Mar 2024 12:25:31 +0100 Subject: [PATCH 07/68] fix: while loops --- src/operators/nn/functional/max_pool.cairo | 151 +++++---------------- 1 file changed, 31 insertions(+), 120 deletions(-) diff --git a/src/operators/nn/functional/max_pool.cairo b/src/operators/nn/functional/max_pool.cairo index c6fc8b5c8..624cc2775 100644 --- a/src/operators/nn/functional/max_pool.cairo +++ b/src/operators/nn/functional/max_pool.cairo @@ -194,10 +194,7 @@ fn max_pool_implementation< Option::None => { let mut pads = ArrayTrait::new(); let mut i = 0; - loop { - if i == n_dims { - break; - } + while i != n_dims { pads.append(0); pads.append(0); i += 1; @@ -210,10 +207,7 @@ fn max_pool_implementation< Option::None => { let mut dilations = ArrayTrait::new(); let mut i = 0; - loop { - if i == n_dims { - break; - } + while i != n_dims { dilations.append(1); i += 1; }; @@ -225,10 +219,7 @@ fn max_pool_implementation< Option::None => { let mut strides = ArrayTrait::new(); let mut i = 0; - loop { - if i == n_dims { - break; - } + while i != n_dims { strides.append(1); i += 1; }; @@ -256,10 +247,7 @@ fn max_pool_implementation< let output_spatial_shape = if ceil_mode == 1 { let mut output_spatial_shape = ArrayTrait::::new(); let mut i = 0; - loop { - if i == input_spatial_shape.len() { - break; - } + while i != input_spatial_shape.len() { let oss: T = NumberTrait::ceil( (NumberTrait::new_unscaled( (*input_spatial_shape.at(i) + *pads.at(i) + *pads.at(i + n_dims)).into(), false @@ -286,10 +274,7 @@ fn max_pool_implementation< } else { let mut output_spatial_shape = ArrayTrait::::new(); let mut i = 0; - loop { - if i == input_spatial_shape.len() { - break; - } + while i != input_spatial_shape.len() { let oss: T = NumberTrait::floor( (NumberTrait::new_unscaled( (*input_spatial_shape.at(i) + *pads.at(i) + *pads.at(i + n_dims)).into(), false @@ -315,10 +300,7 @@ fn max_pool_implementation< let mut pads = ArrayTrait::new(); let mut i = 0; - loop { - if i == input_spatial_shape.len() { - break; - } + while i != input_spatial_shape.len() { let oss: T = NumberTrait::ceil( NumberTrait::new_unscaled((*input_spatial_shape.at(i)).into(), false) / NumberTrait::new_unscaled((*strides.at(i)).into(), false) @@ -347,10 +329,7 @@ fn max_pool_implementation< let mut pads = ArrayTrait::new(); let mut i = 0; - loop { - if i == input_spatial_shape.len() { - break; - } + while i != input_spatial_shape.len() { let oss: T = NumberTrait::floor( NumberTrait::new_unscaled((*input_spatial_shape.at(i)).into(), false) @@ -376,10 +355,7 @@ fn max_pool_implementation< AUTO_PAD::VALID => { let mut output_spatial_shape = ArrayTrait::::new(); let mut i = 0; - loop { - if i == input_spatial_shape.len() { - break; - } + while i != input_spatial_shape.len() { let oss: T = NumberTrait::ceil( (NumberTrait::new_unscaled((*input_spatial_shape.at(i)).into(), false) - NumberTrait::new_unscaled( @@ -489,17 +465,11 @@ fn max_pool_1d, +NumberTrait, +Copy, +Drop let mut I_data = ArrayTrait::new(); let mut c = 0; - loop { - if c == total_channels { - break; - } + while c != total_channels { let x_d = c * x_step; let mut ph = 0; - loop { - if ph == y_step { - break; - } + while ph != y_step { let hstart = I32Number::new((ph).into(), false) * stride_h - pad_h; let hend = hstart + ks_h * dilation_h; @@ -507,10 +477,7 @@ fn max_pool_1d, +NumberTrait, +Copy, +Drop let mut Yh: T = NumberTrait::min_value(); let mut h = hstart; - loop { - if h >= hend { - break; - } + while h != hend { if h >= 0 && h < x_step.into() { if *(*X).data.at(x_d + h.into()) > Yh { h_index = h.into(); @@ -592,25 +559,16 @@ fn max_pool_2d< let X_len = (*X).data.len(); let mut c = 0; - loop { - if c == total_channels { - break; - } + while c != total_channels { let x_d = c * x_step; let mut ph = 0; - loop { - if ph == pooled_H { - break; - } + while ph != pooled_H { let hstart = I32Number::new((ph).into(), false) * stride_h - pad_h; let hend = hstart + ks_h * dilation_h; let mut pw = 0; - loop { - if pw == pooled_W { - break; - } + while pw != pooled_W { let wstart = I32Number::new((pw).into(), false) * stride_w - pad_w; let wend = wstart + ks_w * dilation_w; @@ -620,16 +578,10 @@ fn max_pool_2d< let mut Yh: T = NumberTrait::min_value(); let mut h = hstart; - loop { - if h >= hend { - break; - } + while h != hend { if h >= 0 && h < H.into() { let mut w = wstart; - loop { - if w >= wend { - break; - } + while w != wend { if w >= 0 && w < W.into() { let input_index = h * W.into() + w; if input_index >= 0 && input_index < X_len.into() { @@ -732,33 +684,22 @@ fn max_pool_3d< let X_len = (*X).data.len(); let mut c = 0; - loop { - if c == total_channels { - break; - } + + while c != total_channels { let x_d = c * x_step; let mut ph = 0; - loop { - if ph == pooled_H { - break; - } + while ph != pooled_H { let hstart = I32Number::new((ph).into(), false) * stride_h - pad_h; let hend = hstart + ks_h * dilation_h; let mut pw = 0; - loop { - if pw == pooled_W { - break; - } + while pw != pooled_W { let wstart = I32Number::new((pw).into(), false) * stride_w - pad_w; let wend = wstart + ks_w * dilation_w; let mut pd = 0; - loop { - if pd == pooled_D { - break; - } + while pd != pooled_D { let dstart = I32Number::new((pd).into(), false) * stride_d - pad_d; let dend = dstart + ks_d * dilation_d; @@ -769,22 +710,13 @@ fn max_pool_3d< let mut Yh: T = NumberTrait::min_value(); let mut h = hstart; - let mut Yh = loop { - if h >= hend { - break Yh; - } + while h != hend { if h >= 0 && h < H.into() { let mut w = wstart; - loop { - if w >= wend { - break Yh; - } + while w != wend { if w >= 0 && w < W.into() { let mut d = dstart; - loop { - if d >= dend { - break; - } + while d != dend { if d >= 0 && d < D.into() { let input_index = h * W.into() * D.into() + w * D.into() @@ -904,17 +836,11 @@ fn max_pool_nd< let X_len = (*X).data.len(); let mut c = 0; - loop { - if c == total_channels { - break; - } + while c != total_channels { let x_d = c * x_step; let mut p = 0; - loop { - if p == y_step { - break; - } + while p != y_step { let mut flatten_index = p; @@ -923,10 +849,7 @@ fn max_pool_nd< let mut nstep = ArrayTrait::::new(); let mut n = 0; - loop { - if n == nd { - break; - } + while n != nd { let (pn, rem) = DivRem::div_rem( flatten_index, (*y_stride.at(2 + n)).try_into().unwrap() ); @@ -953,20 +876,14 @@ fn max_pool_nd< let mut Yh: T = NumberTrait::min_value(); let mut i = 0; - let Yh = loop { - if i == max_iter { - break Yh; - } + while i != max_iter { let mut flatten_index = i; let mut is_outside = false; let mut i_index = ArrayTrait::new(); let mut input_index = I32Number::zero(); let mut n = 0; - loop { - if n == nd { - break Yh; - } + while n != nd { let (item, rem) = DivRem::div_rem( flatten_index, (*nstride.at(n)).try_into().unwrap() ); @@ -997,10 +914,7 @@ fn max_pool_nd< if storage_order == 0 { let mut index = 0; let mut n = 0; - loop { - if n == nd { - break; - } + while n != nd { index += *n_index.at(n) * (*x_stride.at(2 + n)).into(); n += 1; }; @@ -1008,10 +922,7 @@ fn max_pool_nd< } else { let mut index = 0; let mut n = nd; - loop { - if n == 0 { - break; - } + while n != 0 { index += *n_index.at(n - 1) * (*i_stride_storage_order_1.at(nd - n)).into(); n -= 1; }; From 439976d10091aaa74a57f463aeda8191de8b3ea1 Mon Sep 17 00:00:00 2001 From: zhangzhichao Date: Tue, 5 Mar 2024 17:43:31 +0800 Subject: [PATCH 08/68] style: Update to a while loop --- .../tensor/manipulation/center_crop_pad.cairo | 64 ++++++------------- 1 file changed, 20 insertions(+), 44 deletions(-) diff --git a/src/operators/tensor/manipulation/center_crop_pad.cairo b/src/operators/tensor/manipulation/center_crop_pad.cairo index a47818f45..21c9f5531 100644 --- a/src/operators/tensor/manipulation/center_crop_pad.cairo +++ b/src/operators/tensor/manipulation/center_crop_pad.cairo @@ -36,10 +36,8 @@ fn center_crop_pad< Option::None => { let mut axes: Array = ArrayTrait::new(); let mut i: usize = 0; - loop { - if i > input_rank - 1 { - break (); - } + + while i < input_rank{ axes.append(i); i += 1; }; @@ -55,10 +53,8 @@ fn center_crop_pad< Option::Some(dim) => { let mut temp: Array = ArrayTrait::new(); let mut i: usize = 0; - loop { - if i > *dim - 1 { - break (); - } + + while i < *dim{ temp.append(i); i += 1; }; @@ -193,10 +189,8 @@ fn tensor_crop< let mut count = 1; let mut shape: Array = ArrayTrait::new(); let mut i: usize = 0; - loop { - if i > input_data_shape_copy.len() - 1 { - break (); - } + + while i < input_data_shape_copy.len(){ shape.append(*input_data_shape_copy.at(i)); i += 1; }; @@ -213,13 +207,11 @@ fn tensor_crop< let mut arr_list: Array> = make_array_from_dim(res, count); res = ArrayTrait::::new().span(); let mut j: usize = 0; - loop { - if j > slice_len - 1 { - break (); - } + + while j < slice_len{ res = res.concat(arr_list.at(*slice.at(j)).span()); j += 1; - } + }; } break (); } @@ -233,10 +225,8 @@ fn tensor_crop< Option::Some(mut arr) => { let mut arr = make_array_from_dim(arr.span(), count); let mut j: usize = 0; - loop { - if j > slice_len - 1 { - break (); - } + + while j < slice_len{ res = res.concat(arr.at(*slice.at(j)).span()); j += 1; }; @@ -257,14 +247,9 @@ fn make_zero_array< +Copy >(size: usize, zero: T) -> Span { let mut res: Array = ArrayTrait::new(); - if size == 0 { - return res.span(); - } let mut i: usize = 0; - loop { - if i > size - 1 { - break (); - } + + while i < size{ res.append(zero.clone()); i += 1; }; @@ -274,10 +259,8 @@ fn make_zero_array< fn slice(start: usize, end: usize) -> Array { let mut index: Array = ArrayTrait::new(); let mut i: usize = start; - loop { - if i > end - 1 { - break; - } + + while i < end{ index.append(i); i += 1; }; @@ -293,10 +276,8 @@ fn array_cover(ref arr: Array>, index: usize, data: Array) { let mut arr_len: usize = arr.len(); let mut i: usize = 0; - loop { - if i > arr_len - 1 { - break (); - } + + while i < arr_len{ let temp = arr.pop_front().unwrap(); if i == index { arr.append(data.clone()); @@ -316,11 +297,8 @@ fn usize_cover(ref arr: Array, index: usize, data: usize) { let mut arr_len: usize = arr.len(); let mut i: usize = 0; - loop { - if i > arr_len - 1 { - break (); - } + while i < arr_len{ let temp = arr.pop_front().unwrap(); if i == index { arr.append(data.clone()); @@ -337,10 +315,8 @@ fn make_array_from_dim, +Copy>(input_data: Span, dim: usize) - let mut res = ArrayTrait::>::new(); let mut i: usize = 0; - loop { - if i > row - 1 { - break (); - } + + while i < row{ let mut temp: Array = ArrayTrait::new(); let mut j: usize = 0; loop { From a4fb878d12bb709bb4c77abb5a7b3b9c9d51b96c Mon Sep 17 00:00:00 2001 From: chachaleo Date: Wed, 6 Mar 2024 13:08:47 +0100 Subject: [PATCH 09/68] feat: qlinear conv --- docs/SUMMARY.md | 1 + docs/framework/compatibility.md | 1 + docs/framework/operators/tensor/README.md | 1 + .../operators/tensor/tensor.qlinear_conv.md | 159 +++++++++++++++ nodegen/node/conv.py | 3 + nodegen/node/qlinear_conv.py | 101 ++++++++++ src/operators/nn.cairo | 3 + src/operators/nn/common.cairo | 14 ++ src/operators/nn/core.cairo | 3 +- src/operators/nn/functional/conv.cairo | 22 +-- .../nn/implementations/nn_fp16x16.cairo | 3 +- .../nn/implementations/nn_fp32x32.cairo | 3 +- .../nn/implementations/nn_fp64x64.cairo | 3 +- .../nn/implementations/nn_fp8x23.cairo | 4 +- src/operators/nn/implementations/nn_i32.cairo | 3 +- src/operators/nn/implementations/nn_i8.cairo | 3 +- src/operators/nn/implementations/nn_u32.cairo | 3 +- src/operators/tensor/core.cairo | 181 ++++++++++++++++++ .../tensor/implementations/tensor_bool.cairo | 21 ++ .../implementations/tensor_complex64.cairo | 22 +++ .../implementations/tensor_fp16x16.cairo | 40 ++++ .../implementations/tensor_fp16x16wide.cairo | 21 ++ .../implementations/tensor_fp32x32.cairo | 40 ++++ .../implementations/tensor_fp64x64.cairo | 40 ++++ .../implementations/tensor_fp8x23.cairo | 40 ++++ .../implementations/tensor_fp8x23wide.cairo | 21 ++ .../tensor/implementations/tensor_i32.cairo | 40 ++++ .../tensor/implementations/tensor_i8.cairo | 40 ++++ .../tensor/implementations/tensor_u32.cairo | 21 ++ src/operators/tensor/quantization.cairo | 1 + .../tensor/quantization/qlinear_conv.cairo | 139 ++++++++++++++ tests/nodes.cairo | 1 + tests/nodes/qlinear_conv.cairo | 46 +++++ tests/nodes/qlinear_conv/input_0.cairo | 24 +++ tests/nodes/qlinear_conv/input_1.cairo | 16 ++ tests/nodes/qlinear_conv/input_2.cairo | 18 ++ tests/nodes/qlinear_conv/output_0.cairo | 24 +++ 37 files changed, 1099 insertions(+), 27 deletions(-) create mode 100644 docs/framework/operators/tensor/tensor.qlinear_conv.md create mode 100644 nodegen/node/qlinear_conv.py create mode 100644 src/operators/nn/common.cairo create mode 100644 src/operators/tensor/quantization/qlinear_conv.cairo create mode 100644 tests/nodes/qlinear_conv.cairo create mode 100644 tests/nodes/qlinear_conv/input_0.cairo create mode 100644 tests/nodes/qlinear_conv/input_1.cairo create mode 100644 tests/nodes/qlinear_conv/input_2.cairo create mode 100644 tests/nodes/qlinear_conv/output_0.cairo diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 477601b37..17a09bc0d 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -113,6 +113,7 @@ * [tensor.qlinear\_matmul](framework/operators/tensor/tensor.qlinear\_matmul.md) * [tensor.qlinear\_concat](framework/operators/tensor/tensor.qlinear\_concat.md) * [tensor.qlinear\_leakyrelu](framework/operators/tensor/tensor.qlinear\_leakyrelu.md) + * [tensor.qlinear\_conv](framework/operators/tensor/tensor.qlinear\_conv.md) * [tensor.nonzero](framework/operators/tensor/tensor.nonzero.md) * [tensor.squeeze](framework/operators/tensor/tensor.squeeze.md) * [tensor.unsqueeze](framework/operators/tensor/tensor.unsqueeze.md) diff --git a/docs/framework/compatibility.md b/docs/framework/compatibility.md index f3f84ac3f..725c7a3a3 100644 --- a/docs/framework/compatibility.md +++ b/docs/framework/compatibility.md @@ -66,6 +66,7 @@ You can see below the list of current supported ONNX Operators: | [QlinearAdd](operators/tensor/tensor.qlinear\_add.md) | :white\_check\_mark: | | [QlinearMul](operators/tensor/tensor.qlinear\_mul.md) | :white\_check\_mark: | | [QLinearLeakyRelu](operators/tensor/tensor.qlinear\_leakyrelu.md) | :white\_check\_mark: | +| [QLinearConv](operators/tensor/tensor.qlinear\_conv_.md) | :white\_check\_mark: | | [Nonzero](operators/tensor/tensor.nonzero.md) | :white\_check\_mark: | | [Squeeze](operators/tensor/tensor.squeeze.md) | :white\_check\_mark: | | [Unsqueeze](operators/tensor/tensor.unsqueeze.md) | :white\_check\_mark: | diff --git a/docs/framework/operators/tensor/README.md b/docs/framework/operators/tensor/README.md index fe2995096..eba6bc647 100644 --- a/docs/framework/operators/tensor/README.md +++ b/docs/framework/operators/tensor/README.md @@ -86,6 +86,7 @@ use orion::operators::tensor::TensorTrait; | [`tensor.qlinear_matmul`](tensor.qlinear\_matmul.md) | Performs the product of two quantized i8 Tensors. | | [`tensor.qlinear_concat`](tensor.qlinear\_concat.md) | Concatenate a list of tensors after dequantizing them with their respective scales and zero_points and returns the quantized result. | | [`tensor.qlinear_leakyrelu`](tensor.qlinear\_leakyrelu.md) | Applies the Leaky Relu operator to a quantized Tensor | +| [`tensor.qlinear_conv`](tensor.qlinear\_conv.md) | Performs convolution on quantized Tensors | | [`tensor.gather`](tensor.gather.md) | Gather entries of the axis dimension of data. | | [`tensor.nonzero`](tensor.nonzero.md) | Produces indices of the elements that are non-zero (in row-major order - by dimension). | | [`tensor.squeeze`](tensor.squeeze.md) | Removes dimensions of size 1 from the shape of a tensor. | diff --git a/docs/framework/operators/tensor/tensor.qlinear_conv.md b/docs/framework/operators/tensor/tensor.qlinear_conv.md new file mode 100644 index 000000000..b800674c7 --- /dev/null +++ b/docs/framework/operators/tensor/tensor.qlinear_conv.md @@ -0,0 +1,159 @@ +# tensor.qlinear_conv + +```rust + +qlinear_conv( + self: @Tensor, + X_scale: @Tensor, + X_zero_point: @Tensor, + W: @Tensor, + W_scale: @Tensor, + W_zero_point: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + y_scale: @Tensor, + y_zero_point: @Tensor, +) -> Tensor +``` + +Performs convolution on quantized Tensors + +The convolution operator consumes a quantized input tensor, its scale and zero point, a quantized filter, its scale and zero point, +and output's scale and zero point, and computes the quantized output. Each scale and zero-point pair must have same shape. +It means they must be either scalars (per tensor) or 1-D tensors (per output channel). Each input or output and its related zero point must have same type. + +## Args + +* `X`(`@Tensor`) - Quantized input data tensor, has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 ... x Dn). +* `X_scale`(`@Tensor`) - Scale for input `X`. +* `X_zero_point`(`@Tensor`) - Zero point for input `X`. +* `W`(`@Tensor`) - Quantized weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x ... x kn), where (k1 x k2 x ... kn) is the dimension of the kernel. +* `W_scale`(`@Tensor`) - Scale for input `W`. +* `W_zero_point`(`@Tensor`) - Zero point for input `W`. +* `B`(`Option<@Tensor>`) - Optional 1D bias to be added to the convolution, has size of M. Bias must be quantized using scale = x_scale * w_scale and zero_point = 0. +* `auto_pad`(`Option`) - Default is NOTSET, auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. NOTSET means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = ceil(input_shape[i] / strides[i])` for each axis `i`. +* `dilations`(`Option>`) - Dilation value along each spatial axis of the filter. If not present, the dilation defaults to 1 along each spatial axis. +* `group`(`Option`) - Default is 1, number of groups input channels and output channels are divided into. +* `kernel_shape`(`Option>`) - The shape of the convolution kernel. If not present, should be inferred from input W. +* `pads`(`Option>`) - Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis. +* `strides`(`Option>`) - Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis. +* `y_scale`(`@Tensor`) - Scale for output. +* `y_zero_point`(`@Tensor`) - Zero point for output. + +## Returns + +A new `Tensor`, containing the quantized result of the convolution of the dequantized inputs. + +## Type Constraints + +u32 tensor, not supported. +fp8x23wide tensor, not supported. +fp16x16wide tensor, not supported. + +## Example + +```rust + use orion::operators::tensor::{TensorTrait, Tensor}; + use orion::operators::tensor::I8TensorPartialEq; + use orion::utils::{assert_eq, assert_seq_eq}; + use orion::operators::tensor::{I8Tensor, I8TensorAdd}; + use orion::operators::tensor::FP16x16TensorPartialEq; + use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; + use core::array::{ArrayTrait, SpanTrait}; + use orion::operators::tensor::implementations::tensor_fp16x16::{TensorI8IntoTensorFP16x16, FP16x16TensorSub,FP16x16TensorDiv,FP16x16TensorMul}; + use orion::numbers::{FP16x16, I8IntoFP16x16}; + + fn qlinear_conv_example() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + let mut X = TensorTrait::new(shape.span(), data.span()); + + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(1); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(0_i8); + let mut W = TensorTrait::new(shape.span(), data.span()); + + let mut shape = ArrayTrait::::new(); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 32768, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 26214, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 13107, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + let mut param = TensorTrait::new(shape.span(), data.span()); + + let X_scale = TensorTrait::new( + shape: array![1].span(), data: array![*param.data.at(0)].span(), + ); + let X_zero_point = TensorTrait::new( + shape: array![1].span(), data: array![*param.data.at(1)].span(), + ); + let W_scale = TensorTrait::new( + shape: array![1].span(), data: array![*param.data.at(2)].span(), + ); + let W_zero_point = TensorTrait::new( + shape: array![1].span(), data: array![*param.data.at(3)].span(), + ); + let y_scale = TensorTrait::new( + shape: array![1].span(), data: array![*param.data.at(4)].span(), + ); + let y_zero_point = TensorTrait::new( + shape: array![1].span(), data: array![*param.data.at(5)].span(), + ); + + return X + .qlinear_conv( + @X_scale, + @X_zero_point, + @W, + @W_scale, + @W_zero_point, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + @y_scale, + @y_zero_point, + ); + } + +>>> [ + [ + [ + [ 7, 4, 1], + [ -2, -5, -8], + [-11, -14, -17], + ] + ] + ] +``` diff --git a/nodegen/node/conv.py b/nodegen/node/conv.py index 3b05b621d..d1d75430e 100644 --- a/nodegen/node/conv.py +++ b/nodegen/node/conv.py @@ -37,6 +37,9 @@ def conv( pads = [0 for s in X.shape[2:]] * 2 if strides is None: strides = [1 for s in X.shape[2:]] + + if group is None: + group=1 if X.shape[1] != W.shape[1] * group or W.shape[0] % group != 0: raise ValueError( diff --git a/nodegen/node/qlinear_conv.py b/nodegen/node/qlinear_conv.py new file mode 100644 index 000000000..3bdee0819 --- /dev/null +++ b/nodegen/node/qlinear_conv.py @@ -0,0 +1,101 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl +from .conv import conv + + +def qlinear_conv( + x, + x_scale, + x_zero_point, + w, + w_scale, + w_zero_point, + y_scale, + y_zero_point, + B=None, + auto_pad=None, + dilations=None, + group=None, + kernel_shape=None, + pads=None, + strides=None, +): + X = x.astype(np.int32) + if x_zero_point is not None: + X -= x_zero_point + W = w.astype(np.int32) + if w_zero_point is not None: + if len(w_zero_point.shape) == 1 and w_zero_point.shape[0] == W.shape[0]: + missing = (w_zero_point.shape[0],) + (1,) * (len(W.shape) - 1) + W -= w_zero_point.reshape(missing) + else: + W -= w_zero_point + res = conv( + X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides + ).astype(np.int32) + R = res * (x_scale * w_scale / y_scale) + if y_zero_point is not None: + R += y_zero_point + if y_zero_point.dtype == np.int8: + R = np.clip(R, -128, 127) + else: + R = np.clip(R, 0, 255) + return (np.rint(R).astype(y_zero_point.dtype),) + if x.dtype == np.int8: + R = np.clip(R, -128, 127) + else: + R = np.clip(R, 0, 255) + return (np.rint(R).astype(x.dtype),) + + +class Qlinear_conv(RunAll): + @staticmethod + def export_qlinear_conv() -> None: + x = np.array([ + [1, 2, 3], + [4, 5, 6], + [7, 8, 9]], + + dtype=np.int8, + ).reshape((1, 1, 3, 3)) + x_scale = np.float32(0.5) + x_zero_point = np.int8(2) + + w = np.array([0], dtype=np.int8).reshape((1, 1, 1, 1)) + w_scale = np.array([0.4], dtype=np.float32) + w_zero_point = np.array([3], dtype=np.int8) + + y_scale = np.float32(0.2) + y_zero_point = np.int8(4) + + param = np.array([0.5, 2, 0.4, 3, 0.2, 4]) + + y = qlinear_conv(x,x_scale,x_zero_point,w,w_scale,w_zero_point,y_scale,y_zero_point,) + y = np.array(y) + + x = Tensor(Dtype.I8, x.shape, x.flatten()) + w = Tensor(Dtype.I8, w.shape, w.flatten()) + y = Tensor(Dtype.I8, y.shape, y.flatten()) + param = Tensor(Dtype.FP16x16, param.shape, to_fp(param.flatten(), FixedImpl.FP16x16)) + + + name = "qlinear_conv" + func_sig = "qlinear_conv(" + func_sig += "@input_0," + func_sig += "@TensorTrait::new(shape: array![1].span(), data: array![*input_2.data.at(0)].span(),)," + func_sig += "@TensorTrait::new(shape: array![1].span(), data: array![*input_2.data.at(1)].span(),)," + func_sig += "@input_1," + func_sig += "@TensorTrait::new(shape: array![1].span(), data: array![*input_2.data.at(2)].span(),)," + func_sig += "@TensorTrait::new(shape: array![1].span(), data: array![*input_2.data.at(3)].span(),)," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "@TensorTrait::new(shape: array![1].span(), data: array![*input_2.data.at(4)].span(),)," + func_sig += "@TensorTrait::new(shape: array![1].span(), data: array![*input_2.data.at(5)].span(),))" + make_test( + [x, w, param], y, func_sig, name) diff --git a/src/operators/nn.cairo b/src/operators/nn.cairo index 625e63216..42755a102 100644 --- a/src/operators/nn.cairo +++ b/src/operators/nn.cairo @@ -1,6 +1,9 @@ mod core; mod implementations; mod functional; +mod common; + +use orion::operators::nn::common::{AUTO_PAD, POOLING_TYPE}; use orion::operators::nn::core::NNTrait; diff --git a/src/operators/nn/common.cairo b/src/operators/nn/common.cairo new file mode 100644 index 000000000..d10ad8430 --- /dev/null +++ b/src/operators/nn/common.cairo @@ -0,0 +1,14 @@ +#[derive(Copy, Drop)] +enum AUTO_PAD { + NOTSET, + SAME_UPPER, + SAME_LOWER, + VALID +} + +#[derive(Copy, Drop)] +enum POOLING_TYPE { + AVG, + LPPOOL, + MAX, +} diff --git a/src/operators/nn/core.cairo b/src/operators/nn/core.cairo index 35d318b28..8942c7b87 100644 --- a/src/operators/nn/core.cairo +++ b/src/operators/nn/core.cairo @@ -1,4 +1,5 @@ use orion::operators::tensor::core::Tensor; +use orion::operators::nn::AUTO_PAD; /// Trait /// @@ -946,7 +947,7 @@ trait NNTrait { X: @Tensor, W: @Tensor, B: Option>, - auto_pad: Option, + auto_pad: Option, dilations: Option>, group: Option, kernel_shape: Option>, diff --git a/src/operators/nn/functional/conv.cairo b/src/operators/nn/functional/conv.cairo index ac72c336d..0dcea51f2 100644 --- a/src/operators/nn/functional/conv.cairo +++ b/src/operators/nn/functional/conv.cairo @@ -1,30 +1,14 @@ -use core::debug::PrintTrait; - use orion::numbers::NumberTrait; use orion::numbers::{U32IntoI32, I32IntoU32, I32Div, I32Number}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,}; use orion::operators::vec::{NullableVec, NullableVecImpl}; use orion::operators::tensor::core::{stride}; -#[derive(Copy, Drop)] -enum AUTO_PAD { - NOTSET, - SAME_UPPER, - SAME_LOWER, - VALID -} +use orion::operators::nn::AUTO_PAD; + fn conv< - T, - MAG, - +TensorTrait, - +NumberTrait, - +Copy, - +Drop, - +Add, - +Mul, - +AddEq, - +PrintTrait, + T, MAG, +TensorTrait, +NumberTrait, +Copy, +Drop, +Add, +Mul, +AddEq, >( X: @Tensor, W: @Tensor, diff --git a/src/operators/nn/implementations/nn_fp16x16.cairo b/src/operators/nn/implementations/nn_fp16x16.cairo index 1c018ade3..7c1218d78 100644 --- a/src/operators/nn/implementations/nn_fp16x16.cairo +++ b/src/operators/nn/implementations/nn_fp16x16.cairo @@ -11,6 +11,7 @@ use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ use orion::operators::tensor::implementations::tensor_fp16x16wide::{ FP16x16WTensor, FP16x16WTensorDiv, FP16x16WTensorAdd }; +use orion::operators::nn::AUTO_PAD; impl FP16x16NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -134,7 +135,7 @@ impl FP16x16NN of NNTrait { X: @Tensor, W: @Tensor, B: Option>, - auto_pad: Option, + auto_pad: Option, dilations: Option>, group: Option, kernel_shape: Option>, diff --git a/src/operators/nn/implementations/nn_fp32x32.cairo b/src/operators/nn/implementations/nn_fp32x32.cairo index a5725eccb..b4ea1e132 100644 --- a/src/operators/nn/implementations/nn_fp32x32.cairo +++ b/src/operators/nn/implementations/nn_fp32x32.cairo @@ -5,6 +5,7 @@ use orion::numbers::fixed_point::implementations::fp32x32::core::{FP32x32, FP32x use orion::operators::tensor::implementations::tensor_fp32x32::{ FP32x32Tensor, FP32x32TensorDiv, FP32x32TensorAdd }; +use orion::operators::nn::AUTO_PAD; impl FP32x32NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -128,7 +129,7 @@ impl FP32x32NN of NNTrait { X: @Tensor, W: @Tensor, B: Option>, - auto_pad: Option, + auto_pad: Option, dilations: Option>, group: Option, kernel_shape: Option>, diff --git a/src/operators/nn/implementations/nn_fp64x64.cairo b/src/operators/nn/implementations/nn_fp64x64.cairo index 01a3b30ad..c7dac0072 100644 --- a/src/operators/nn/implementations/nn_fp64x64.cairo +++ b/src/operators/nn/implementations/nn_fp64x64.cairo @@ -5,6 +5,7 @@ use orion::numbers::fixed_point::implementations::fp64x64::core::{FP64x64, FP64x use orion::operators::tensor::implementations::tensor_fp64x64::{ FP64x64Tensor, FP64x64TensorDiv, FP64x64TensorAdd }; +use orion::operators::nn::AUTO_PAD; impl FP64x64NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -128,7 +129,7 @@ impl FP64x64NN of NNTrait { X: @Tensor, W: @Tensor, B: Option>, - auto_pad: Option, + auto_pad: Option, dilations: Option>, group: Option, kernel_shape: Option>, diff --git a/src/operators/nn/implementations/nn_fp8x23.cairo b/src/operators/nn/implementations/nn_fp8x23.cairo index d80d2c323..3a5afab88 100644 --- a/src/operators/nn/implementations/nn_fp8x23.cairo +++ b/src/operators/nn/implementations/nn_fp8x23.cairo @@ -9,6 +9,8 @@ use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ FP8x23WImpl, FP8x23WTryIntoFP8x23, FP8x23W, FP8x23IntoFP8x23W }; use orion::operators::tensor::implementations::tensor_fp8x23wide::{FP8x23WTensor}; +use orion::operators::nn::AUTO_PAD; + impl FP8x23NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -130,7 +132,7 @@ impl FP8x23NN of NNTrait { X: @Tensor, W: @Tensor, B: Option>, - auto_pad: Option, + auto_pad: Option, dilations: Option>, group: Option, kernel_shape: Option>, diff --git a/src/operators/nn/implementations/nn_i32.cairo b/src/operators/nn/implementations/nn_i32.cairo index 29a94d288..bdde4fb3b 100644 --- a/src/operators/nn/implementations/nn_i32.cairo +++ b/src/operators/nn/implementations/nn_i32.cairo @@ -2,6 +2,7 @@ use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::operators::tensor::implementations::tensor_i32::{I32Tensor, I32TensorAdd}; +use orion::operators::nn::AUTO_PAD; impl I32NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -121,7 +122,7 @@ impl I32NN of NNTrait { X: @Tensor, W: @Tensor, B: Option>, - auto_pad: Option, + auto_pad: Option, dilations: Option>, group: Option, kernel_shape: Option>, diff --git a/src/operators/nn/implementations/nn_i8.cairo b/src/operators/nn/implementations/nn_i8.cairo index e22de6b43..c6d718f71 100644 --- a/src/operators/nn/implementations/nn_i8.cairo +++ b/src/operators/nn/implementations/nn_i8.cairo @@ -2,6 +2,7 @@ use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::operators::tensor::implementations::tensor_i8::{I8Tensor, I8TensorAdd}; +use orion::operators::nn::AUTO_PAD; impl I8NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -121,7 +122,7 @@ impl I8NN of NNTrait { X: @Tensor, W: @Tensor, B: Option>, - auto_pad: Option, + auto_pad: Option, dilations: Option>, group: Option, kernel_shape: Option>, diff --git a/src/operators/nn/implementations/nn_u32.cairo b/src/operators/nn/implementations/nn_u32.cairo index 7352b7ad9..1104239f4 100644 --- a/src/operators/nn/implementations/nn_u32.cairo +++ b/src/operators/nn/implementations/nn_u32.cairo @@ -2,6 +2,7 @@ use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::operators::tensor::implementations::tensor_u32::{U32Tensor, U32TensorAdd}; +use orion::operators::nn::AUTO_PAD; impl U32NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -121,7 +122,7 @@ impl U32NN of NNTrait { X: @Tensor, W: @Tensor, B: Option>, - auto_pad: Option, + auto_pad: Option, dilations: Option>, group: Option, kernel_shape: Option>, diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 0d21a4de3..39ce5eb04 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -9,6 +9,8 @@ use alexandria_data_structures::array_ext::{SpanTraitExt}; use orion::operators::tensor::helpers::{len_from_shape, check_shape}; use orion::numbers::{NumberTrait, I32IntoU32, U32IntoI32}; +use orion::operators::nn::AUTO_PAD; + #[derive(Copy, Drop)] struct Tensor { shape: Span, @@ -84,6 +86,7 @@ impl TensorSerde, impl TDrop: Drop> of Serde { fn qlinear_leakyrelu( self: @Tensor, a_scale: @Tensor, a_zero_point: @Tensor, alpha: T ) -> Tensor::; + /// # tensor.qlinear_conv + /// + /// ```rust + /// + /// qlinear_conv( + /// self: @Tensor, + /// X_scale: @Tensor, + /// X_zero_point: @Tensor, + /// W: @Tensor, + /// W_scale: @Tensor, + /// W_zero_point: @Tensor, + /// B: Option>, + /// auto_pad: Option, + /// dilations: Option>, + /// group: Option, + /// kernel_shape: Option>, + /// pads: Option>, + /// strides: Option>, + /// y_scale: @Tensor, + /// y_zero_point: @Tensor, + /// ) -> Tensor + /// ``` + /// + /// Performs convolution on quantized Tensors + /// + /// The convolution operator consumes a quantized input tensor, its scale and zero point, a quantized filter, its scale and zero point, + /// and output's scale and zero point, and computes the quantized output. Each scale and zero-point pair must have same shape. + /// It means they must be either scalars (per tensor) or 1-D tensors (per output channel). Each input or output and its related zero point must have same type. + /// + /// ## Args + /// + /// * `self`(`@Tensor`) - Quantized input data tensor, has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 ... x Dn). + /// * `X_scale`(`@Tensor`) - Scale for input `X`. + /// * `X_zero_point`(`@Tensor`) - Zero point for input `X`. + /// * `W`(`@Tensor`) - Quantized weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x ... x kn), where (k1 x k2 x ... kn) is the dimension of the kernel. + /// * `W_scale`(`@Tensor`) - Scale for input `W`. + /// * `W_zero_point`(`@Tensor`) - Zero point for input `W`. + /// * `B`(`Option<@Tensor>`) - Optional 1D bias to be added to the convolution, has size of M. Bias must be quantized using scale = x_scale * w_scale and zero_point = 0. + /// * `auto_pad`(`Option`) - Default is NOTSET, auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. NOTSET means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = ceil(input_shape[i] / strides[i])` for each axis `i`. + /// * `dilations`(`Option>`) - Dilation value along each spatial axis of the filter. If not present, the dilation defaults to 1 along each spatial axis. + /// * `group`(`Option`) - Default is 1, number of groups input channels and output channels are divided into. + /// * `kernel_shape`(`Option>`) - The shape of the convolution kernel. If not present, should be inferred from input W. + /// * `pads`(`Option>`) - Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis. + /// * `strides`(`Option>`) - Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis. + /// * `y_scale`(`@Tensor`) - Scale for output. + /// * `y_zero_point`(`@Tensor`) - Zero point for output. + /// + /// ## Returns + /// + /// A new `Tensor`, containing the quantized result of the convolution of the dequantized inputs. + /// + /// ## Type Constraints + /// + /// u32 tensor, not supported. + /// fp8x23wide tensor, not supported. + /// fp16x16wide tensor, not supported. + /// + /// ## Example + /// + /// ```rust + /// use orion::operators::tensor::{TensorTrait, Tensor}; + /// use orion::operators::tensor::I8TensorPartialEq; + /// use orion::utils::{assert_eq, assert_seq_eq}; + /// use orion::operators::tensor::{I8Tensor, I8TensorAdd}; + /// use orion::operators::tensor::FP16x16TensorPartialEq; + /// use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; + /// use core::array::{ArrayTrait, SpanTrait}; + /// use orion::operators::tensor::implementations::tensor_fp16x16::{TensorI8IntoTensorFP16x16, FP16x16TensorSub,FP16x16TensorDiv,FP16x16TensorMul}; + /// use orion::numbers::{FP16x16, I8IntoFP16x16}; + /// + /// fn qlinear_conv_example() -> Tensor { + /// let mut shape = ArrayTrait::::new(); + /// shape.append(1); + /// shape.append(1); + /// shape.append(3); + /// shape.append(3); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(1); + /// data.append(2); + /// data.append(3); + /// data.append(4); + /// data.append(5); + /// data.append(6); + /// data.append(7); + /// data.append(8); + /// data.append(9); + /// let mut X = TensorTrait::new(shape.span(), data.span()); + /// + /// let mut shape = ArrayTrait::::new(); + /// shape.append(1); + /// shape.append(1); + /// shape.append(1); + /// shape.append(1); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(0_i8); + /// let mut W = TensorTrait::new(shape.span(), data.span()); + /// + /// let mut shape = ArrayTrait::::new(); + /// shape.append(6); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(FP16x16 { mag: 32768, sign: false }); + /// data.append(FP16x16 { mag: 131072, sign: false }); + /// data.append(FP16x16 { mag: 26214, sign: false }); + /// data.append(FP16x16 { mag: 196608, sign: false }); + /// data.append(FP16x16 { mag: 13107, sign: false }); + /// data.append(FP16x16 { mag: 262144, sign: false }); + /// let mut param = TensorTrait::new(shape.span(), data.span()); + /// + /// let X_scale = TensorTrait::new( + /// shape: array![1].span(), data: array![*param.data.at(0)].span(), + /// ); + /// let X_zero_point = TensorTrait::new( + /// shape: array![1].span(), data: array![*param.data.at(1)].span(), + /// ); + /// let W_scale = TensorTrait::new( + /// shape: array![1].span(), data: array![*param.data.at(2)].span(), + /// ); + /// let W_zero_point = TensorTrait::new( + /// shape: array![1].span(), data: array![*param.data.at(3)].span(), + /// ); + /// let y_scale = TensorTrait::new( + /// shape: array![1].span(), data: array![*param.data.at(4)].span(), + /// ); + /// let y_zero_point = TensorTrait::new( + /// shape: array![1].span(), data: array![*param.data.at(5)].span(), + /// ); + /// + /// return X + /// .qlinear_conv( + /// @X_scale, + /// @X_zero_point, + /// @W, + /// @W_scale, + /// @W_zero_point, + /// Option::None, + /// Option::None, + /// Option::None, + /// Option::None, + /// Option::None, + /// Option::None, + /// Option::None, + /// @y_scale, + /// @y_zero_point, + /// ); + /// } + /// + /// >>> [ + /// [ + /// [ + /// [ 7, 4, 1], + /// [ -2, -5, -8], + /// [-11, -14, -17], + /// ] + /// ] + /// ] + /// ``` + /// + fn qlinear_conv( + self: @Tensor, + X_scale: @Tensor, + X_zero_point: @Tensor, + W: @Tensor, + W_scale: @Tensor, + W_zero_point: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + y_scale: @Tensor, + y_zero_point: @Tensor, + ) -> Tensor; /// # tensor.slice /// /// ```rust @@ -3120,6 +3300,7 @@ trait TensorTrait { /// [6 7]] /// ``` /// + fn slice( self: @Tensor, starts: Span, diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index 612a397cc..842464a95 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -6,6 +6,7 @@ use orion::operators::tensor::core::{ use orion::operators::tensor::{math, linalg, quantization, core as core_ops, ml, manipulation}; use orion::numbers::{NumberTrait}; use orion::operators::tensor::implementations::tensor_u32::U32Tensor; +use orion::operators::nn::AUTO_PAD; impl BoolTensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { @@ -322,6 +323,26 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } + fn qlinear_conv( + self: @Tensor, + X_scale: @Tensor, + X_zero_point: @Tensor, + W: @Tensor, + W_scale: @Tensor, + W_zero_point: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + y_scale: @Tensor, + y_zero_point: @Tensor, + ) -> Tensor { + panic(array!['not supported!']) + } + fn round(self: @Tensor) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index c9c31ae23..15a3aed55 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -12,6 +12,8 @@ use orion::operators::tensor::implementations::{ use orion::numbers::complex_number::complex_trait::ComplexTrait; use orion::numbers::complex_number::complex64::{Complex64Impl, complex64}; +use orion::operators::nn::AUTO_PAD; + impl Complex64Tensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { new_tensor(shape, data) @@ -290,6 +292,26 @@ impl Complex64Tensor of TensorTrait { panic(array!['not supported!']) } + fn qlinear_conv( + self: @Tensor, + X_scale: @Tensor, + X_zero_point: @Tensor, + W: @Tensor, + W_scale: @Tensor, + W_zero_point: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + y_scale: @Tensor, + y_zero_point: @Tensor, + ) -> Tensor { + panic(array!['not supported!']) + } + fn slice( self: @Tensor, starts: Span, diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index a37ed0442..5b53dff73 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -12,6 +12,8 @@ use orion::operators::tensor::implementations::{ use orion::numbers::fixed_point::implementations::fp16x16::math::trig::PI; use orion::numbers::fixed_point::implementations::fp16x16wide::core::FP16x16W; +use orion::operators::nn::AUTO_PAD; +use orion::operators::nn::implementations::nn_fp16x16::FP16x16NN; impl FP16x16Tensor of TensorTrait { @@ -338,6 +340,44 @@ impl FP16x16Tensor of TensorTrait { ) } + fn qlinear_conv( + self: @Tensor, + X_scale: @Tensor, + X_zero_point: @Tensor, + W: @Tensor, + W_scale: @Tensor, + W_zero_point: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + y_scale: @Tensor, + y_zero_point: @Tensor, + ) -> Tensor { + quantization::qlinear_conv::qlinear_conv( + self, + X_scale, + X_zero_point, + W, + W_scale, + W_zero_point, + B, + auto_pad, + dilations, + group, + kernel_shape, + pads, + strides, + y_scale, + y_zero_point, + NumberTrait::new_unscaled(128, true), + NumberTrait::new_unscaled(127, false) + ) + } + fn slice( self: @Tensor, starts: Span, diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index 2003b28ff..d82722434 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -16,6 +16,7 @@ use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ }; use orion::numbers::fixed_point::implementations::fp16x16::core::FP16x16; +use orion::operators::nn::AUTO_PAD; impl FP16x16WTensor of TensorTrait { @@ -302,6 +303,26 @@ impl FP16x16WTensor of TensorTrait { panic(array!['not supported!']) } + fn qlinear_conv( + self: @Tensor, + X_scale: @Tensor, + X_zero_point: @Tensor, + W: @Tensor, + W_scale: @Tensor, + W_zero_point: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + y_scale: @Tensor, + y_zero_point: @Tensor, + ) -> Tensor { + panic(array!['not supported!']) + } + fn slice( self: @Tensor, starts: Span, diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 4870226a1..2918331da 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -10,6 +10,8 @@ use orion::numbers::fixed_point::implementations::fp32x32::core::ONE; use orion::operators::tensor::implementations::{ tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor }; +use orion::operators::nn::AUTO_PAD; +use orion::operators::nn::implementations::nn_fp32x32::FP32x32NN; impl FP32x32Tensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { @@ -335,6 +337,44 @@ impl FP32x32Tensor of TensorTrait { ) } + fn qlinear_conv( + self: @Tensor, + X_scale: @Tensor, + X_zero_point: @Tensor, + W: @Tensor, + W_scale: @Tensor, + W_zero_point: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + y_scale: @Tensor, + y_zero_point: @Tensor, + ) -> Tensor { + quantization::qlinear_conv::qlinear_conv( + self, + X_scale, + X_zero_point, + W, + W_scale, + W_zero_point, + B, + auto_pad, + dilations, + group, + kernel_shape, + pads, + strides, + y_scale, + y_zero_point, + NumberTrait::new_unscaled(128, true), + NumberTrait::new_unscaled(127, false) + ) + } + fn slice( self: @Tensor, starts: Span, diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 3a7214d18..877a0c22e 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -10,6 +10,8 @@ use orion::numbers::fixed_point::implementations::fp64x64::core::ONE; use orion::operators::tensor::implementations::{ tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor }; +use orion::operators::nn::AUTO_PAD; +use orion::operators::nn::implementations::nn_fp64x64::FP64x64NN; impl FP64x64Tensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { @@ -335,6 +337,44 @@ impl FP64x64Tensor of TensorTrait { ) } + fn qlinear_conv( + self: @Tensor, + X_scale: @Tensor, + X_zero_point: @Tensor, + W: @Tensor, + W_scale: @Tensor, + W_zero_point: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + y_scale: @Tensor, + y_zero_point: @Tensor, + ) -> Tensor { + quantization::qlinear_conv::qlinear_conv( + self, + X_scale, + X_zero_point, + W, + W_scale, + W_zero_point, + B, + auto_pad, + dilations, + group, + kernel_shape, + pads, + strides, + y_scale, + y_zero_point, + NumberTrait::new_unscaled(128, true), + NumberTrait::new_unscaled(127, false) + ) + } + fn slice( self: @Tensor, starts: Span, diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index b4a26d749..a4afb879e 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -10,6 +10,8 @@ use orion::operators::tensor::implementations::{ tensor_i8::I8Tensor, tensor_u32::U32Tensor, tensor_bool::BoolTensor }; use orion::numbers::fixed_point::implementations::fp8x23::math::trig::PI; +use orion::operators::nn::AUTO_PAD; +use orion::operators::nn::implementations::nn_fp8x23::FP8x23NN; impl FP8x23Tensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { @@ -335,6 +337,44 @@ impl FP8x23Tensor of TensorTrait { ) } + fn qlinear_conv( + self: @Tensor, + X_scale: @Tensor, + X_zero_point: @Tensor, + W: @Tensor, + W_scale: @Tensor, + W_zero_point: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + y_scale: @Tensor, + y_zero_point: @Tensor, + ) -> Tensor { + quantization::qlinear_conv::qlinear_conv( + self, + X_scale, + X_zero_point, + W, + W_scale, + W_zero_point, + B, + auto_pad, + dilations, + group, + kernel_shape, + pads, + strides, + y_scale, + y_zero_point, + NumberTrait::new_unscaled(128, true), + NumberTrait::new_unscaled(127, false) + ) + } + fn slice( self: @Tensor, starts: Span, diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index 06a297b69..ce0d3ab6c 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -12,6 +12,7 @@ use orion::operators::tensor::implementations::{ use orion::numbers::fixed_point::implementations::fp8x23wide::math::trig::PI; use orion::numbers::fixed_point::implementations::fp8x23::core::FP8x23; +use orion::operators::nn::AUTO_PAD; impl FP8x23WTensor of TensorTrait { @@ -289,6 +290,26 @@ impl FP8x23WTensor of TensorTrait { panic(array!['not supported!']) } + fn qlinear_conv( + self: @Tensor, + X_scale: @Tensor, + X_zero_point: @Tensor, + W: @Tensor, + W_scale: @Tensor, + W_zero_point: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + y_scale: @Tensor, + y_zero_point: @Tensor, + ) -> Tensor { + panic(array!['not supported!']) + } + fn slice( self: @Tensor, starts: Span, diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 296876516..e44495504 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -10,6 +10,8 @@ use orion::numbers::{NumberTrait}; use orion::operators::tensor::implementations::{ tensor_u32::U32Tensor, tensor_i8::I8Tensor, tensor_bool::BoolTensor }; +use orion::operators::nn::AUTO_PAD; +use orion::operators::nn::implementations::nn_i32::I32NN; impl I32Tensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { @@ -330,6 +332,44 @@ impl I32Tensor of TensorTrait { ) } + fn qlinear_conv( + self: @Tensor, + X_scale: @Tensor, + X_zero_point: @Tensor, + W: @Tensor, + W_scale: @Tensor, + W_zero_point: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + y_scale: @Tensor, + y_zero_point: @Tensor, + ) -> Tensor { + quantization::qlinear_conv::qlinear_conv( + self, + X_scale, + X_zero_point, + W, + W_scale, + W_zero_point, + B, + auto_pad, + dilations, + group, + kernel_shape, + pads, + strides, + y_scale, + y_zero_point, + NumberTrait::new_unscaled(128, true), + NumberTrait::new_unscaled(127, false) + ) + } + fn slice( self: @Tensor, starts: Span, diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 42d807c68..68272b659 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -8,6 +8,8 @@ use orion::operators::tensor::core::{ use orion::operators::tensor::{math, linalg, quantization, core as core_tensor, ml, manipulation}; use orion::numbers::{NumberTrait}; use orion::operators::tensor::implementations::{tensor_u32::U32Tensor, tensor_bool::BoolTensor}; +use orion::operators::nn::AUTO_PAD; +use orion::operators::nn::implementations::nn_i8::I8NN; impl I8Tensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { @@ -333,6 +335,44 @@ impl I8Tensor of TensorTrait { ) } + fn qlinear_conv( + self: @Tensor, + X_scale: @Tensor, + X_zero_point: @Tensor, + W: @Tensor, + W_scale: @Tensor, + W_zero_point: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + y_scale: @Tensor, + y_zero_point: @Tensor, + ) -> Tensor { + quantization::qlinear_conv::qlinear_conv( + self, + X_scale, + X_zero_point, + W, + W_scale, + W_zero_point, + B, + auto_pad, + dilations, + group, + kernel_shape, + pads, + strides, + y_scale, + y_zero_point, + NumberTrait::new_unscaled(127, true), + NumberTrait::new_unscaled(127, false) + ) + } + fn slice( self: @Tensor, starts: Span, diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index efb681a86..d9d8c6fe1 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -7,6 +7,7 @@ use orion::operators::tensor::core::{ use orion::operators::tensor::{math, linalg, quantization, core as core_tensor, ml, manipulation}; use orion::numbers::{NumberTrait}; use orion::operators::tensor::implementations::{tensor_i8::I8Tensor, tensor_bool::BoolTensor}; +use orion::operators::nn::AUTO_PAD; impl U32Tensor of TensorTrait { fn new(shape: Span, data: Span) -> Tensor { @@ -277,6 +278,26 @@ impl U32Tensor of TensorTrait { panic(array!['not supported!']) } + fn qlinear_conv( + self: @Tensor, + X_scale: @Tensor, + X_zero_point: @Tensor, + W: @Tensor, + W_scale: @Tensor, + W_zero_point: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + y_scale: @Tensor, + y_zero_point: @Tensor, + ) -> Tensor { + panic(array!['not supported!']) + } + fn slice( self: @Tensor, starts: Span, diff --git a/src/operators/tensor/quantization.cairo b/src/operators/tensor/quantization.cairo index 4f56fc5cd..29fc85a32 100644 --- a/src/operators/tensor/quantization.cairo +++ b/src/operators/tensor/quantization.cairo @@ -6,3 +6,4 @@ mod qlinear_concat; mod qlinear_add; mod qlinear_mul; mod qlinear_leakyrelu; +mod qlinear_conv; diff --git a/src/operators/tensor/quantization/qlinear_conv.cairo b/src/operators/tensor/quantization/qlinear_conv.cairo new file mode 100644 index 000000000..d4c276825 --- /dev/null +++ b/src/operators/tensor/quantization/qlinear_conv.cairo @@ -0,0 +1,139 @@ +use orion::numbers::NumberTrait; +use orion::operators::tensor::{TensorTrait, Tensor}; + +use orion::operators::tensor::quantization::dequantize_linear::dequantize_linear; +use orion::operators::tensor::quantization::quantize_linear::quantize_linear; +use orion::operators::nn::{NNTrait}; +use orion::operators::nn::AUTO_PAD; +//use orion::operators::nn::functional::conv::conv; + +/// # tensor.qlinear_conv +/// +/// ```rust +/// +/// qlinear_conv( +/// X: @Tensor, +/// X_scale: @Tensor, +/// X_zero_point: @Tensor, +/// W: @Tensor, +/// W_scale: @Tensor, +/// W_zero_point: @Tensor, +/// B: Option>, +/// auto_pad: Option, +/// dilations: Option>, +/// group: Option, +/// kernel_shape: Option>, +/// pads: Option>, +/// strides: Option>, +/// y_scale: @Tensor, +/// y_zero_point: @Tensor, +/// ) -> Tensor +/// ``` +/// +/// Performs convolution on quantized Tensors +/// +/// The convolution operator consumes a quantized input tensor, its scale and zero point, a quantized filter, its scale and zero point, +/// and output's scale and zero point, and computes the quantized output. Each scale and zero-point pair must have same shape. +/// It means they must be either scalars (per tensor) or 1-D tensors (per output channel). Each input or output and its related zero point must have same type. +/// +/// ## Args +/// +/// * `X`(`@Tensor`) - Quantized input data tensor, has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the 2D image. Otherwise the size is (N x C x D1 x D2 ... x Dn). +/// * `X_scale`(`@Tensor`) - Scale for input `X`. +/// * `X_zero_point`(`@Tensor`) - Zero point for input `X`. +/// * `W`(`@Tensor`) - Quantized weight tensor that will be used in the convolutions; has size (M x C/group x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel, and M is the number of feature maps. For more than 2 dimensions, the kernel shape will be (M x C/group x k1 x k2 x ... x kn), where (k1 x k2 x ... kn) is the dimension of the kernel. +/// * `W_scale`(`@Tensor`) - Scale for input `W`. +/// * `W_zero_point`(`@Tensor`) - Zero point for input `W`. +/// * `B`(`Option<@Tensor>`) - Optional 1D bias to be added to the convolution, has size of M. Bias must be quantized using scale = x_scale * w_scale and zero_point = 0. +/// * `auto_pad`(`Option`) - Default is NOTSET, auto_pad must be either NOTSET, SAME_UPPER, SAME_LOWER or VALID. NOTSET means explicit padding is used. SAME_UPPER or SAME_LOWER mean pad the input so that `output_shape[i] = ceil(input_shape[i] / strides[i])` for each axis `i`. +/// * `dilations`(`Option>`) - Dilation value along each spatial axis of the filter. If not present, the dilation defaults to 1 along each spatial axis. +/// * `group`(`Option`) - Default is 1, number of groups input channels and output channels are divided into. +/// * `kernel_shape`(`Option>`) - The shape of the convolution kernel. If not present, should be inferred from input W. +/// * `pads`(`Option>`) - Padding for the beginning and ending along each spatial axis, it can take any value greater than or equal to 0. The value represent the number of pixels added to the beginning and end part of the corresponding axis. `pads` format should be as follow [x1_begin, x2_begin...x1_end, x2_end,...], where xi_begin the number of pixels added at the beginning of axis `i` and xi_end, the number of pixels added at the end of axis `i`. This attribute cannot be used simultaneously with auto_pad attribute. If not present, the padding defaults to 0 along start and end of each spatial axis. +/// * `strides`(`Option>`) - Stride along each spatial axis. If not present, the stride defaults to 1 along each spatial axis. +/// * `y_scale`(`@Tensor`) - Scale for output. +/// * `y_zero_point`(`@Tensor`) - Zero point for output. +/// +/// ## Returns +/// +/// A new `Tensor`, containing the quantized result of the convolution of the dequantized inputs. +/// +/// ## Type Constraints +/// +/// u32 tensor, not supported. +/// fp8x23wide tensor, not supported. +/// fp16x16wide tensor, not supported. +/// +/// ## Example +/// +/// ```rust + +/// ``` +/// + +fn qlinear_conv< + T, + MAG, + Q, + impl TTensor: TensorTrait, + impl QTensor: TensorTrait, + impl QIntoT: Into, + impl QTensorIntoTTensor: Into, Tensor>, + impl TAdd: Add, + impl TSub: Sub, + impl TDiv: Div, + impl TMul: Mul, + impl TTensorMul: Mul>, + impl TTensorSub: Sub>, + impl TTensorDiv: Div>, + impl TPartialOrd: PartialOrd, + impl TNumber: NumberTrait, + impl TTryInto: TryInto, + impl TAddEq: AddEq, + impl TCopy: Copy, + impl TDrop: Drop, + impl QCopy: Copy, + impl QDrop: Drop, + +NNTrait, +>( + X: @Tensor, + X_scale: @Tensor, + X_zero_point: @Tensor, + W: @Tensor, + W_scale: @Tensor, + W_zero_point: @Tensor, + B: Option>, + auto_pad: Option, + dilations: Option>, + group: Option, + kernel_shape: Option>, + pads: Option>, + strides: Option>, + y_scale: @Tensor, + y_zero_point: @Tensor, + min: T, + max: T +) -> Tensor { + assert((*X).shape.len() >= 3, 'X must have at least 3 dim'); + let mut dequantized_X = dequantize_linear(@(*X), X_scale, X_zero_point); + let mut dequantized_W = dequantize_linear(@(*W), W_scale, W_zero_point); + let B = match B { + Option::Some(B) => { + Option::Some( + dequantize_linear( + @TensorTrait::new(array![B.len()].span(), B), + @(*X_scale * *W_scale), + @TensorTrait::new(array![1].span(), array![NumberTrait::::zero()].span()) + ) + .data + ) + }, + Option::None => { Option::None } + }; + + let mut y = NNTrait::conv( + @dequantized_X, @dequantized_W, B, auto_pad, dilations, group, kernel_shape, pads, strides + ); + + return (quantize_linear(@y, @(*y_scale), y_zero_point, min, max)); +} diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 29bebb762..06bcff8f9 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1047,3 +1047,4 @@ mod label_encoder_fp8x23_default; mod label_encoder_i8_default; mod label_encoder_i32_default; mod label_encoder_u32_default; +mod qlinear_conv; diff --git a/tests/nodes/qlinear_conv.cairo b/tests/nodes/qlinear_conv.cairo new file mode 100644 index 000000000..dc7b580dd --- /dev/null +++ b/tests/nodes/qlinear_conv.cairo @@ -0,0 +1,46 @@ +mod input_0; +mod input_1; +mod input_2; +mod output_0; + + +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I8TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::implementations::tensor_fp16x16::{ + TensorI8IntoTensorFP16x16, FP16x16TensorSub, FP16x16TensorDiv, FP16x16TensorMul +}; +use orion::numbers::{I8IntoFP16x16}; + +#[test] +#[available_gas(2000000000)] +fn test_qlinear_conv() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let z_0 = output_0::output_0(); + + let y_0 = FP16x16Tensor::qlinear_conv( + @input_0, + @TensorTrait::new(shape: array![1].span(), data: array![*input_2.data.at(0)].span(),), + @TensorTrait::new(shape: array![1].span(), data: array![*input_2.data.at(1)].span(),), + @input_1, + @TensorTrait::new(shape: array![1].span(), data: array![*input_2.data.at(2)].span(),), + @TensorTrait::new(shape: array![1].span(), data: array![*input_2.data.at(3)].span(),), + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + Option::None, + @TensorTrait::new(shape: array![1].span(), data: array![*input_2.data.at(4)].span(),), + @TensorTrait::new(shape: array![1].span(), data: array![*input_2.data.at(5)].span(),) + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/qlinear_conv/input_0.cairo b/tests/nodes/qlinear_conv/input_0.cairo new file mode 100644 index 000000000..677366ad8 --- /dev/null +++ b/tests/nodes/qlinear_conv/input_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/qlinear_conv/input_1.cairo b/tests/nodes/qlinear_conv/input_1.cairo new file mode 100644 index 000000000..4066a17fa --- /dev/null +++ b/tests/nodes/qlinear_conv/input_1.cairo @@ -0,0 +1,16 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(1); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/qlinear_conv/input_2.cairo b/tests/nodes/qlinear_conv/input_2.cairo new file mode 100644 index 000000000..02fb4d2ec --- /dev/null +++ b/tests/nodes/qlinear_conv/input_2.cairo @@ -0,0 +1,18 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 32768, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 26214, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 13107, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/qlinear_conv/output_0.cairo b/tests/nodes/qlinear_conv/output_0.cairo new file mode 100644 index 000000000..98e542ec7 --- /dev/null +++ b/tests/nodes/qlinear_conv/output_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(7); + data.append(4); + data.append(1); + data.append(-2); + data.append(-5); + data.append(-8); + data.append(-11); + data.append(-14); + data.append(-17); + TensorTrait::new(shape.span(), data.span()) +} From 05f4c9616361d2337bc8fb97268b143fcf3ba5d0 Mon Sep 17 00:00:00 2001 From: chachaleo Date: Thu, 7 Mar 2024 10:17:22 +0100 Subject: [PATCH 10/68] refactor nn: helpers and enum --- src/operators/nn.cairo | 4 + src/operators/nn/common.cairo | 28 ++ src/operators/nn/core.cairo | 9 +- src/operators/nn/functional/col2im.cairo | 75 +---- src/operators/nn/functional/conv.cairo | 184 +---------- .../nn/functional/conv_transpose.cairo | 205 +----------- src/operators/nn/functional/grid_sample.cairo | 252 +------------- src/operators/nn/helpers.cairo | 310 ++++++++++++++++++ .../nn/implementations/nn_fp16x16.cairo | 5 +- .../nn/implementations/nn_fp32x32.cairo | 5 +- .../nn/implementations/nn_fp64x64.cairo | 5 +- .../nn/implementations/nn_fp8x23.cairo | 9 +- src/operators/nn/implementations/nn_i32.cairo | 5 +- src/operators/nn/implementations/nn_i8.cairo | 5 +- src/operators/nn/implementations/nn_u32.cairo | 5 +- src/operators/tensor/math/resize.cairo | 93 +----- tests/nodes/conv_2D_with_autopad_same.cairo | 2 +- tests/nodes/grid_sample_cubic.cairo | 2 +- tests/nodes/grid_sample_nearest.cairo | 2 +- .../grid_sample_nearest_aligncorner.cairo | 2 +- tests/nodes/grid_sample_padding_border.cairo | 2 +- .../grid_sample_padding_reflection.cairo | 2 +- tests/nodes/grid_sample_padding_zeros.cairo | 2 +- 23 files changed, 421 insertions(+), 792 deletions(-) create mode 100644 src/operators/nn/common.cairo create mode 100644 src/operators/nn/helpers.cairo diff --git a/src/operators/nn.cairo b/src/operators/nn.cairo index 625e63216..376dd757f 100644 --- a/src/operators/nn.cairo +++ b/src/operators/nn.cairo @@ -1,6 +1,10 @@ mod core; mod implementations; mod functional; +mod common; +mod helpers; + +use orion::operators::nn::common::{AUTO_PAD, MODE, PADDING_MODE}; use orion::operators::nn::core::NNTrait; diff --git a/src/operators/nn/common.cairo b/src/operators/nn/common.cairo new file mode 100644 index 000000000..2c20ff794 --- /dev/null +++ b/src/operators/nn/common.cairo @@ -0,0 +1,28 @@ +#[derive(Copy, Drop)] +enum AUTO_PAD { + NOTSET, + SAME_UPPER, + SAME_LOWER, + VALID +} + +#[derive(Copy, Drop)] +enum POOLING_TYPE { + AVG, + LPPOOL, + MAX, +} + +#[derive(Copy, Drop)] +enum MODE { + NEAREST, + LINEAR, + CUBIC, +} + +#[derive(Copy, Drop)] +enum PADDING_MODE { + ZEROS, + BORDER, + REFLECTION, +} diff --git a/src/operators/nn/core.cairo b/src/operators/nn/core.cairo index 35d318b28..baa125e17 100644 --- a/src/operators/nn/core.cairo +++ b/src/operators/nn/core.cairo @@ -1,4 +1,5 @@ use orion::operators::tensor::core::Tensor; +use orion::operators::nn::{AUTO_PAD, MODE, PADDING_MODE}; /// Trait /// @@ -946,7 +947,7 @@ trait NNTrait { X: @Tensor, W: @Tensor, B: Option>, - auto_pad: Option, + auto_pad: Option, dilations: Option>, group: Option, kernel_shape: Option>, @@ -1086,7 +1087,7 @@ trait NNTrait { X: @Tensor, W: @Tensor, B: Option<@Tensor>, - auto_pad: Option, + auto_pad: Option, dilations: Option>, group: Option, kernel_shape: Option>, @@ -1301,7 +1302,7 @@ trait NNTrait { X: @Tensor, grid: @Tensor, align_corner: Option, - mode: Option, - padding_mode: Option, + mode: Option, + padding_mode: Option, ) -> Tensor; } diff --git a/src/operators/nn/functional/col2im.cairo b/src/operators/nn/functional/col2im.cairo index 4f9cfc1a8..bab67a376 100644 --- a/src/operators/nn/functional/col2im.cairo +++ b/src/operators/nn/functional/col2im.cairo @@ -1,9 +1,10 @@ use orion::numbers::NumberTrait; -use orion::operators::tensor::core::{stride}; +use orion::operators::tensor::core::{stride, unravel_index}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,}; use orion::operators::vec::{NullableVec, NullableVecImpl}; +use orion::operators::nn::helpers::{is_out, prod}; -fn col2im, +NumberTrait, +Copy, +Drop, +Add, +Mul,>( +fn col2im, +NumberTrait, +Copy, +Drop, +Add, +MulEq,>( data: @Tensor, image_shape: Span, block_shape: Span, @@ -53,7 +54,7 @@ fn col2im, +NumberTrait, +Copy, +Drop, +Ad }, }; - let bl = prod(block_shape, 0); + let bl = prod(block_shape); let C = *(*data).shape.at(1) / bl; let mut new_shape = array![*(*data).shape.at(0), C, bl]; @@ -148,15 +149,15 @@ fn col2im_naive_implementation< let mut data_im = NullableVecImpl::new(); data_im.set(*image_shape.at(0) * *stride_img.at(0) - 1, NumberTrait::zero()); - let kernel_size = prod(kernel_shape, 0); - let col_size = prod(dim_col, 0); + let kernel_size = prod(kernel_shape); + let col_size = prod(dim_col); let mut c_col = 0; while c_col != kernel_size { - let offset = get_indices(c_col, kernel_shape).span(); + let offset = unravel_index(c_col, kernel_shape); let mut col = 0; while col != col_size { - let ind_col = get_indices(col, dim_col).span(); + let ind_col = unravel_index(col, dim_col); let mut ind_im: Array = array![]; let mut i = 0; while i != n_dims { @@ -208,7 +209,7 @@ fn col2im_shape_check, +Copy, +Drop,>( ) { let n_input_plane = *(*X).shape.at(0); - let kernel_size = prod(kernel_shape, 0); + let kernel_size = prod(kernel_shape); assert(n_input_plane % kernel_size == 0, 'wrong input dimension'); @@ -230,63 +231,7 @@ fn col2im_shape_check, +Copy, +Drop,>( i += 1; }; - let block_size = prod(n_blocks.span(), 0); + let block_size = prod(n_blocks.span()); assert(input_length == block_size, 'input_length != block_size'); } - -fn get_indices(index: usize, shape: Span,) -> Array { - let mut i = index; - let mut res: Array = array![]; - let mut k = shape.len() - 1; - while k != 0 { - let m = i % *shape.at(k); - res.append(m); - i -= m; - i /= *shape.at(k); - k -= 1; - }; - - let mut new_res: Array = array![]; - new_res.append(i); - let mut i = shape.len() - 1; - while i != 0 { - new_res.append(*res.at(i - 1)); - i -= 1; - }; - - new_res -} - -fn is_out(ind: Span, shape: Span,) -> bool { - let mut n = 0; - let is_out = loop { - if n == ind.len() { - break false; - } - let s = *shape.at(n); - let i = *ind.at(n); - if i < 0 { - break true; - } - if i >= s { - break true; - } - n += 1; - }; - - is_out -} - -fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul,>( - pA: Span, start: usize -) -> T { - let mut i = start; - let mut prod = NumberTrait::one(); - while i != pA.len() { - prod = prod * (*pA.at(i)); - i += 1; - }; - - prod -} diff --git a/src/operators/nn/functional/conv.cairo b/src/operators/nn/functional/conv.cairo index ac72c336d..72b41be74 100644 --- a/src/operators/nn/functional/conv.cairo +++ b/src/operators/nn/functional/conv.cairo @@ -1,30 +1,13 @@ -use core::debug::PrintTrait; - use orion::numbers::NumberTrait; use orion::numbers::{U32IntoI32, I32IntoU32, I32Div, I32Number}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,}; use orion::operators::vec::{NullableVec, NullableVecImpl}; use orion::operators::tensor::core::{stride}; - -#[derive(Copy, Drop)] -enum AUTO_PAD { - NOTSET, - SAME_UPPER, - SAME_LOWER, - VALID -} +use orion::operators::nn::helpers::{cartesian, arange, max_in_tensor, min_in_tensor, dot}; +use orion::operators::nn::AUTO_PAD; fn conv< - T, - MAG, - +TensorTrait, - +NumberTrait, - +Copy, - +Drop, - +Add, - +Mul, - +AddEq, - +PrintTrait, + T, MAG, +TensorTrait, +NumberTrait, +Copy, +Drop, +Add, +Mul, +AddEq, >( X: @Tensor, W: @Tensor, @@ -245,7 +228,8 @@ fn conv< } // group == 1 - if *dilations.at(0) != 1 || min(dilations.clone()) != max(dilations.clone()) { + if *dilations.at(0) != 1 + || min_in_tensor(dilations.clone()) != min_in_tensor(dilations.clone()) { // computation of the dilated kernel let nd = dilations.len(); let mut new_kernel_shape: Array = array![]; @@ -1228,161 +1212,3 @@ fn r_index_check(r_index: Span, shape_out: Span) -> bool { flag } -fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul,>( - pA: Span, start: usize -) -> T { - let mut i = start; - let mut prod = NumberTrait::one(); - while i != pA.len() { - prod = prod * (*pA.at(i)); - i += 1; - }; - - prod -} - -fn min(mut a: Span) -> usize { - assert(a.len() > 0, 'span cannot be empty'); - - let mut min = *a.at(0); - loop { - match a.pop_front() { - Option::Some(v) => { if *v < min { - min = *v; - }; }, - Option::None => { break min; } - }; - } -} - -fn max(mut a: Span) -> usize { - assert(a.len() > 0, 'span cannot be empty'); - - let mut max = *a.at(0); - loop { - match a.pop_front() { - Option::Some(v) => { if *v > max { - max = *v; - }; }, - Option::None => { break max; } - }; - } -} - -fn arange(start: usize, end: usize, step: usize) -> Span { - assert((end - start) % step == 0, 'incompatible step value'); - - let mut arr: Array = array![]; - let mut i = start; - while i < end { - arr.append(i); - i += step; - }; - - arr.span() -} - - -fn cartesian(mut arrays: Span>,) -> Span> { - let mut n = 1; - let mut i = arrays.len() - 1; - loop { - n = n * (*(arrays.at(i))).len(); - if i == 0 { - break; - } - i -= 1; - }; - - let mut i = 0; - let mut size_arrays: Array = array![]; - while i != arrays.len() { - size_arrays.append((*(arrays.at(i))).len()); - i += 1; - }; - - let size_arrays = size_arrays.span(); - let mut output_arrays = array![]; - let mut m = n; - - let mut i = 0; - while i != arrays.len() { - m = m / (*(arrays.at(i))).len(); - let mut out = repeat(*(arrays.at(i)), m); - out = repeat_2(out, size_arrays, i); - - output_arrays.append(out); - i += 1; - }; - - let output_arrays = output_arrays.span(); - - let mut i = 0; - let mut ret = ArrayTrait::new(); - while i != n { - let mut j = 0; - let mut x: Array = array![]; - while j != arrays.len() { - x.append(*(output_arrays.at(j)).at(i)); - j += 1; - }; - - ret.append(x.span()); - i += 1; - }; - - ret.span() -} - -fn repeat_2(mut array: Array, size_array: Span, index: usize) -> Array { - let mut size = array.len(); - let mut i = 0; - while i != index { - let mut j = 1; - while j != *size_array.at(index - 1 - i) { - let mut k = 0; - while k != size { - array.append(*array.at(k)); - k += 1; - }; - - j += 1; - }; - - size = size * *size_array.at(index - 1 - i); - i += 1; - }; - - array -} - -fn repeat(array: Span, m: usize,) -> Array { - let mut out: Array = array![]; - let mut j = 0; - while j != array.len() { - let mut k = 0; - while k != m { - out.append(*array.at(j)); - k += 1; - }; - - j += 1; - }; - - out -} - -fn dot< - T, MAG, +Drop, +Copy, +NumberTrait, +Add, +TensorTrait, +AddEq, +Mul, ->( - a: Span, b: Span -) -> T { - let mut i = 0; - let mut sum = NumberTrait::zero(); - while i != a.len() { - sum = sum + *a.at(i) * *b.at(i); - i += 1; - }; - - sum -} diff --git a/src/operators/nn/functional/conv_transpose.cairo b/src/operators/nn/functional/conv_transpose.cairo index f8f810558..8213d40a5 100644 --- a/src/operators/nn/functional/conv_transpose.cairo +++ b/src/operators/nn/functional/conv_transpose.cairo @@ -2,14 +2,12 @@ use orion::numbers::NumberTrait; use orion::operators::tensor::core::{stride}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,}; use orion::operators::vec::{NullableVec, NullableVecImpl}; +use orion::operators::nn::helpers::{is_out, prod, prod_on_subset}; +use orion::operators::nn::functional::col2im::{ + get_image, col2im_naive_implementation, col2im_shape_check +}; +use orion::operators::nn::AUTO_PAD; -#[derive(Copy, Drop)] -enum AUTO_PAD { - NOTSET, - SAME_UPPER, - SAME_LOWER, - VALID -} fn conv_transpose< T, MAG, +TensorTrait, +NumberTrait, +Copy, +Drop, +Add, +Mul, @@ -288,14 +286,14 @@ fn conv_transpose< }; let kernel_shape = kernel_shape.span(); - let kernel_size = prod(kernel_shape, 0); + let kernel_size = prod(kernel_shape); let mut num_output_channels = *(*W).shape.at(1) * group; let mut kernel_dim = (num_output_channels / group) * kernel_size; let C = *(*X).shape.at(1); let m = kernel_dim; - let n = prod((*X).shape, 2); + let n = prod_on_subset((*X).shape, 2); let k = C / group; let mut final: Array = array![]; @@ -462,192 +460,3 @@ fn conv_transpose< TensorTrait::new(shape.span(), final.span()) } -fn get_image, +Copy>(self: @Tensor, row: usize) -> Span { - assert((*self).shape.len() == 2, 'Expected a 2D tensor'); - - let row_length = *self.shape[1]; - let start = row * row_length; - - (*self).data.slice(start, row_length) -} - -fn col2im_naive_implementation< - T, MAG, +TensorTrait, +NumberTrait, +Copy, +Drop, +Add, ->( - data: @Tensor, - image_shape: Span, - kernel_shape: Span, - dilations: Span, - pads: Span, - strides: Span, -) -> NullableVec { - let n_dims = pads.len() / 2; - - col2im_shape_check(data, image_shape, kernel_shape, dilations, pads, strides); - - let mut dim_col: Array = array![]; - let mut i = 0; - while i != n_dims { - dim_col - .append( - (*image_shape.at(i) - + (*pads.at(i) + *pads.at(i + n_dims)) - - (*dilations.at(i) * (*kernel_shape.at(i) - 1) + 1)) - / *strides.at(i) - + 1 - ); - - i += 1; - }; - - let dim_col = dim_col.span(); - - let stride_img = stride(image_shape); - - let mut data_im = NullableVecImpl::new(); - data_im.set(*image_shape.at(0) * *stride_img.at(0) - 1, NumberTrait::zero()); - - let kernel_size = prod(kernel_shape, 0); - let col_size = prod(dim_col, 0); - let mut c_col = 0; - while c_col != kernel_size { - let offset = get_indices(c_col, kernel_shape).span(); - - let mut col = 0; - while col != col_size { - let ind_col = get_indices(col, dim_col).span(); - let mut ind_im: Array = array![]; - let mut i = 0; - while i != n_dims { - if (*ind_col.at(i) * *strides.at(i) + *offset.at(i) * *dilations.at(i)) < *pads - .at(i) { - let neg_index = *pads.at(i) - - (*ind_col.at(i) * *strides.at(i) + *offset.at(i) * *dilations.at(i)); - ind_im.append(*image_shape.at(i) + neg_index); - } else { - ind_im - .append( - *ind_col.at(i) * *strides.at(i) - + *offset.at(i) * *dilations.at(i) - - *pads.at(i) - ); - } - - i += 1; - }; - - let ind_im = ind_im.span(); - if !is_out(ind_im, image_shape) { - let mut index = 0; - let mut i = 0; - while i != image_shape.len() { - index += *stride_img.at(i) * *ind_im.at(i); - i += 1; - }; - - data_im.set(index, data_im.at(index) + *(*data).data.at(c_col * col_size + col)); - } - - col += 1; - }; - - c_col += 1; - }; - - data_im -} - -fn col2im_shape_check, +Copy, +Drop,>( - X: @Tensor, - output_shape: Span, - kernel_shape: Span, - dilations: Span, - pads: Span, - strides: Span, -) { - let n_input_plane = *(*X).shape.at(0); - - let kernel_size = prod(kernel_shape, 0); - - assert(n_input_plane % kernel_size == 0, 'wrong input dimension'); - - let input_length = *(*X).shape.at(1); - let n_dims = output_shape.len(); - let mut n_blocks: Array = array![]; - - let mut i = 0; - while i != n_dims { - n_blocks - .append( - (*output_shape.at(i) - + (*pads.at(i) + *pads.at(i + n_dims)) - - *dilations.at(i) * (*kernel_shape.at(i) - 1) - - 1) - / *strides.at(i) - + 1 - ); - i += 1; - }; - - let block_size = prod(n_blocks.span(), 0); - - assert(input_length == block_size, 'input_length != block_size'); -} - - -fn get_indices(index: usize, shape: Span,) -> Array { - let mut i = index; - let mut res: Array = array![]; - let mut k = shape.len() - 1; - while k != 0 { - let m = i % *shape.at(k); - res.append(m); - i -= m; - i /= *shape.at(k); - k -= 1; - }; - - let mut new_res: Array = array![]; - new_res.append(i); - let mut i = shape.len() - 1; - while i != 0 { - new_res.append(*res.at(i - 1)); - i -= 1; - }; - - new_res -} - -fn is_out(ind: Span, shape: Span,) -> bool { - let mut n = 0; - let is_out = loop { - if n == ind.len() { - break false; - } - let s = *shape.at(n); - let i = *ind.at(n); - if i < 0 { - break true; - } - if i >= s { - break true; - } - n += 1; - }; - - is_out -} - -fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul,>( - pA: Span, start: usize -) -> T { - let mut i = start; - let mut prod = NumberTrait::one(); - while i != pA.len() { - prod = prod * (*pA.at(i)); - i += 1; - }; - - prod -} - diff --git a/src/operators/nn/functional/grid_sample.cairo b/src/operators/nn/functional/grid_sample.cairo index aed560e37..a652687fa 100644 --- a/src/operators/nn/functional/grid_sample.cairo +++ b/src/operators/nn/functional/grid_sample.cairo @@ -1,24 +1,10 @@ -use core::debug::PrintTrait; - use orion::numbers::FP16x16; use orion::numbers::NumberTrait; use orion::operators::tensor::core::{stride}; use orion::operators::tensor::{FP16x16Tensor, TensorTrait, Tensor, U32Tensor,}; use orion::operators::vec::{NullableVec, NullableVecImpl}; - -#[derive(Copy, Drop)] -enum MODE { - NEAREST, - LINEAR, - CUBIC, -} - -#[derive(Copy, Drop)] -enum PADDING_MODE { - ZEROS, - BORDER, - REFLECTION, -} +use orion::operators::nn::{MODE, PADDING_MODE}; +use orion::operators::nn::helpers::{dot, get_all_coord, prod, zeros, rint, reverse}; fn grid_sample< T, @@ -32,7 +18,6 @@ fn grid_sample< +Sub, +Div, +AddEq, - +PrintTrait, +PartialOrd, +PartialEq, +TryInto, @@ -79,7 +64,7 @@ fn grid_sample< y_dims.append_span(SpanTrait::slice(grid_dims, 1, grid_dims.len() - 2)); let y_dims = y_dims.span(); - if prod(y_dims, 0) == 0 { + if prod(y_dims) == 0 { return TensorTrait::new(array![].span(), array![].span()); } @@ -96,12 +81,12 @@ fn grid_sample< (*X).data, n * *x_stride.at(0) + c * *x_stride.at(1), *x_stride.at(1) ); let X_data_stride = SpanTrait::slice(x_stride, 2, grid_stride.len() - 2); - let all_coords = get_all_coords(SpanTrait::slice(grid_dims, 1, grid_dims.len() - 2)); + let all_coords = get_all_coord(SpanTrait::slice(grid_dims, 1, grid_dims.len() - 2)); let mut ix = 0; while ix != all_coords.len() { let ox = *all_coords.at(ix); - let nx = get_sub(grid_data, grid_data_stride, ox); + let nx = get_grid_data_subset(grid_data, grid_data_stride, ox); let nx = reverse(nx); let x = gs_denormalize_coordinates(nx, dims, align_corner); @@ -187,7 +172,6 @@ fn gs_cubic_interpolation_1d_with_x< +PartialOrd, +PartialEq, +Rem, - +PrintTrait, >( data: Span, x: T, border: Span, padding_mode: PADDING_MODE ) -> T { @@ -266,7 +250,6 @@ fn gs_cubic_interpolation_nd_with_x< +PartialOrd, +PartialEq, +Rem, - +PrintTrait, >( data: Span, data_dims: Span, @@ -338,7 +321,7 @@ fn gs_linear_interpolation_1d_with_x< +PartialOrd, +PartialEq, +Rem, - +PrintTrait + +AddEq, >( data: Span, x: T, border: Span, padding_mode: PADDING_MODE ) -> T { @@ -355,21 +338,6 @@ fn gs_linear_interpolation_1d_with_x< dot(coeffs, v) } -fn dot, +Copy, +NumberTrait, +Add, +TensorTrait, +Mul,>( - a: Span, b: Span -) -> T { - assert(a.len() == b.len(), 'dot: wrong len'); - - let mut i = 0; - let mut sum = NumberTrait::zero(); - while i != a.len() { - sum = sum + *a.at(i) * *b.at(i); - i += 1; - }; - - sum -} - fn gs_linear_interpolation_nd_with_x< T, MAG, @@ -386,7 +354,7 @@ fn gs_linear_interpolation_nd_with_x< +PartialOrd, +PartialEq, +Rem, - +PrintTrait + +AddEq >( data: Span, data_dims: Span, @@ -450,7 +418,6 @@ fn pixel_at_ndarray< +PartialOrd, +PartialEq, +Rem, - +PrintTrait, >( ndarray: Span, ndarray_dims: Span, @@ -525,7 +492,6 @@ fn pixel_at_array< +PartialOrd, +PartialEq, +Rem, - +PrintTrait, >( array: Span, i: T, border: Span, padding_mode: PADDING_MODE ) -> T { @@ -555,68 +521,6 @@ fn pixel_at_array< pixel } -fn zeros, +Copy, +NumberTrait>(n: usize) -> Span { - let mut zeros: Array = array![]; - let mut i = 0; - while i != n { - zeros.append(NumberTrait::zero()); - i += 1; - }; - - zeros.span() -} - -fn rint< - T, - MAG, - +Drop, - +Copy, - +NumberTrait, - +SubEq, - +Rem, - +PartialEq, - +PartialOrd, - +Add, - +Sub ->( - data: Span -) -> Span { - // round to nearest if ties rounds to the nearest even value. - let mut rint: Array = array![]; - let two: T = NumberTrait::one() + NumberTrait::one(); - - let mut i = 0; - while i != data.len() { - let x = *data.at(i); - let mut round = NumberTrait::round(x); - - let diff = round - x; - if diff == NumberTrait::half() { - if round % two != NumberTrait::zero() { - round -= NumberTrait::one() - } - } - - rint.append(round); - i += 1; - }; - - rint.span() -} - -fn clamp, +Copy, +NumberTrait, +PartialOrd>( - val: T, low: T, high: T -) -> T { - if val < low { - return low; - } - - if val > high { - return high; - } - - val -} fn gs_reflect< T, @@ -631,7 +535,6 @@ fn gs_reflect< +Div, +Mul, +Rem, - +PrintTrait, >( x: T, x_min: T, x_max: T ) -> T { @@ -667,18 +570,8 @@ fn gs_reflect< fx } -fn reverse, +Drop,>(data: Span) -> Span { - let mut rev: Array = array![]; - let mut i = data.len(); - while i != 0 { - rev.append(*data.at(i - 1)); - i -= 1; - }; - - rev.span() -} -fn get_sub, +Drop,>( +fn get_grid_data_subset, +Drop,>( data: Span, stride_data: Span, index: Span, ) -> Span { let mut acc_indices = 0; @@ -691,18 +584,6 @@ fn get_sub, +Drop,>( SpanTrait::slice(data, acc_indices, *stride_data.at(index.len() - 1)) } -fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul,>( - pA: Span, start: usize -) -> T { - let mut i = start; - let mut prod = NumberTrait::one(); - while i != pA.len() { - prod = prod * (*pA.at(i)); - i += 1; - }; - - prod -} fn prepare_border< T, @@ -749,17 +630,6 @@ fn prepare_border< borders1.span() } -fn arange(start: usize, end: usize, step: usize) -> Span { - assert((end - start) % step == 0, 'incompatible step value'); - let mut arr: Array = array![]; - let mut i = start; - while i != end { - arr.append(i); - i += step; - }; - - arr.span() -} fn gs_denormalize_coordinates< T, @@ -816,103 +686,17 @@ fn gs_denormalize< x } -fn get_all_coords(shape: Span) -> Span> { - let mut all_indices = array![]; - - let mut i = 0; - while i != shape.len() { - all_indices.append(arange(0, *shape.at(i), 1)); - i += 1; - }; - - cartesian(all_indices.span()) -} - -fn cartesian(mut arrays: Span>,) -> Span> { - let mut n = 1; - let mut i = arrays.len() - 1; - loop { - n = n * (*(arrays.at(i))).len(); - if i == 0 { - break; - } - i -= 1; - }; - - let mut i = 0; - let mut size_arrays: Array = array![]; - while i != arrays.len() { - size_arrays.append((*(arrays.at(i))).len()); - i += 1; - }; - - let size_arrays = size_arrays.span(); - let mut output_arrays = ArrayTrait::>::new(); - let mut m = n; - - let mut i = 0; - while i != arrays.len() { - m = m / (*(arrays.at(i))).len(); - let mut out = repeat(*(arrays.at(i)), m); - out = repeat_2(out, size_arrays, i); - - output_arrays.append(out); - i += 1; - }; - - let output_arrays = output_arrays.span(); - - let mut i = 0; - let mut ret = array![]; - while i != n { - let mut j = 0; - let mut x = ArrayTrait::new(); - while j != arrays.len() { - x.append(*(output_arrays.at(j)).at(i)); - j += 1; - }; - - ret.append(x.span()); - i += 1; - }; - - ret.span() -} - -fn repeat_2(mut array: Array, size_array: Span, index: usize) -> Array { - let mut size = array.len(); - let mut i = 0; - while i != index { - let mut j = 1; - while j != *size_array.at(index - 1 - i) { - let mut k = 0; - while k != size { - array.append(*array.at(k)); - k += 1; - }; - - j += 1; - }; - - size = size * *size_array.at(index - 1 - i); - i += 1; - }; - array -} - -fn repeat(array: Span, m: usize,) -> Array { - let mut out: Array = array![]; - let mut j = 0; - while j != array.len() { - let mut k = 0; - while k != m { - out.append(*array.at(j)); - k += 1; - }; +fn clamp, +Copy, +NumberTrait, +PartialOrd>( + val: T, low: T, high: T +) -> T { + if val < low { + return low; + } - j += 1; - }; + if val > high { + return high; + } - out + val } diff --git a/src/operators/nn/helpers.cairo b/src/operators/nn/helpers.cairo new file mode 100644 index 000000000..a2d5c0b89 --- /dev/null +++ b/src/operators/nn/helpers.cairo @@ -0,0 +1,310 @@ +use orion::numbers::NumberTrait; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::core::{stride, unravel_index}; +use orion::operators::tensor::math::max_in_tensor::max_in_tensor; +use orion::operators::tensor::math::min_in_tensor::min_in_tensor; + + +/// Computes the Cartesian product of multiple arrays. +/// +/// # Arguments +/// * `arrays` - `Span>`, Span containing N spans of usize elements. +/// +/// # Example +// cartesian([1, 2, 3], [4, 5], [6, 7]) +/// +/// >>> [ +/// [1, 4, 6], +/// [1, 4, 7], +/// [1, 5, 6], +/// [1, 5, 7], +/// [2, 4, 6], +/// [2, 4, 7], +/// [2, 5, 6], +/// [2, 5, 7], +/// [3, 4, 6], +/// [3, 4, 7], +/// [3, 5, 6], +/// [3, 5, 7] +/// ] +/// +/// # Returns +/// * A `Span>` containing the result of the Cartesian product. +fn cartesian(mut arrays: Span>,) -> Span> { + let n_array = arrays.len(); + let mut res = ArrayTrait::new(); + let mut n_item = 1; + let mut size_arrays = ArrayTrait::new(); + let mut iter = arrays.clone(); + loop { + match iter.pop_front() { + Option::Some(array) => { + let dim = (*array).len(); + n_item *= dim; + size_arrays.append(dim); + }, + Option::None => { break; } + } + }; + let stride = stride(size_arrays.span()); + + let mut i = 0; + loop { + if i == n_item { + break; + } + let mut flatten_index = i; + let mut item = ArrayTrait::new(); + + let mut n = 0; + loop { + if n == n_array { + break; + } + let (n_index, rem) = DivRem::div_rem( + flatten_index, (*stride.at(n)).try_into().unwrap() + ); + flatten_index = rem; + item.append(*(*arrays.at(n)).at(n_index)); + n += 1; + }; + res.append(item.span()); + i += 1; + }; + + return res.span(); +} + + +/// Computes all coordinates given the shape of a tensor. +/// +/// # Arguments +/// * `shape` - `Span`, A span containing the shape of the tensor as usize elements. +/// +/// # Returns +/// * A span of spans representing all possible coordinates of the tensor. +fn get_all_coord(mut shape: Span) -> Span> { + let mut res = ArrayTrait::new(); + + let stride = stride(shape); + let n_item = *stride.at(0) * *shape.at(0); + let dim = shape.len(); + + let mut i = 0; + loop { + if i == n_item { + break; + } + let mut flatten_index = i; + let mut indices = ArrayTrait::new(); + + let mut n = 0; + loop { + if n == dim { + break; + } + let (n_index, rem) = DivRem::div_rem( + flatten_index, (*stride.at(n)).try_into().unwrap() + ); + flatten_index = rem; + indices.append(n_index); + n += 1; + }; + res.append(indices.span()); + i += 1; + }; + + return res.span(); +} + +/// Checks if an index is out of bounds given the shape of a tensor. +/// +/// # Arguments +/// * `ind` - `Span` - A span containing the index of the tensor as usize elements. +/// * `shape` - `Span` - A span containing the shape of the tensor as usize elements. +/// +/// # Returns +/// * `true` if the index is out of bounds, otherwise `false`. +fn is_out(ind: Span, shape: Span,) -> bool { + let mut n = 0; + let is_out = loop { + if n == ind.len() { + break false; + } + let s = *shape.at(n); + let i = *ind.at(n); + if i < 0 { + break true; + } + if i >= s { + break true; + } + n += 1; + }; + return is_out; +} + +/// Computes the product of all the elements of the input span +/// +/// # Arguments +/// * `a` - `Span`, input span. +/// +/// # Returns +/// * `prod` - `T`, result of the product. +fn prod, +Copy, +NumberTrait, +TensorTrait, +MulEq,>( + mut a: Span +) -> T { + let mut prod = NumberTrait::one(); + loop { + match a.pop_front() { + Option::Some(v) => { prod *= *v; }, + Option::None => { break prod; } + }; + } +} + + +/// Computes the product of all the elements of the input span +/// +/// # Arguments +/// * `a` - `Span`, input span. +/// * `start` - usize. +/// +/// # Returns +/// * `prod` - `T`, result of the product. +fn prod_on_subset, +Copy, +NumberTrait, +TensorTrait, +Mul,>( + pA: Span, start: usize +) -> T { + let mut i = start; + let mut prod = NumberTrait::one(); + while i != pA.len() { + prod = prod * (*pA.at(i)); + i += 1; + }; + + prod +} + +/// Computes the dot product of the inputs span +/// +/// # Arguments +/// * `a` - `Span`, input span. +/// * `b` - `Span`, input span. +/// +/// # Returns +/// * `acc` - `T`, result of the dot product. +fn dot, +Copy, +NumberTrait, +TensorTrait, +AddEq, +Mul,>( + a: Span, b: Span +) -> T { + let mut i = 0; + let mut acc = NumberTrait::zero(); + while i != a.len() { + acc += *a.at(i) * *b.at(i); + i += 1; + }; + + acc +} + + +/// Return evenly spaced values within a given interval. +/// Values are generated within the half-open interval [0, end) (in other words, the interval including start but excluding stop). +/// +/// # Arguments +/// * `start` - usize +/// * `end` - usize +/// * `step` - usize +/// +/// # Returns +//// returns a span of len ceil((end - start) / step), containing the values from `start` to the closest integer to `end` in the interval [0, end) with interval `step`. +fn arange(start: usize, end: usize, step: usize) -> Span { + let mut arr: Array = array![]; + let mut i = start; + while i < end { + arr.append(i); + i += step; + }; + + arr.span() +} + +/// Return a span containing `n` zeros of type `T` +/// +/// # Arguments +/// * `n` - usize +/// +fn zeros, +Copy, +NumberTrait>(n: usize) -> Span { + let mut zeros: Array = array![]; + let mut i = 0; + while i != n { + zeros.append(NumberTrait::zero()); + i += 1; + }; + + zeros.span() +} + + +/// Round elements of the span to the nearest integer. For values exactly halfway between rounded decimal valuesrounds to the nearest even value. +/// +/// # Arguments +/// * `data` - `Span` +/// +/// # Returns +//// a `Span` countaining the rounded values. +fn rint< + T, + MAG, + +Drop, + +Copy, + +NumberTrait, + +SubEq, + +Rem, + +PartialEq, + +PartialOrd, + +Add, + +Sub +>( + data: Span +) -> Span { + let mut rint: Array = array![]; + let two: T = NumberTrait::one() + NumberTrait::one(); + + let mut i = 0; + while i != data.len() { + let x = *data.at(i); + let mut round = NumberTrait::round(x); + + let diff = round - x; + if diff == NumberTrait::half() { + if round % two != NumberTrait::zero() { + round -= NumberTrait::one() + } + } + + rint.append(round); + i += 1; + }; + + rint.span() +} + + +/// Reverse the span input +/// +/// # Arguments +/// * `data` - `Span` +/// +/// # Returns +//// a `Span` countaining the reversed values. +fn reverse, +Drop,>(data: Span) -> Span { + let mut rev: Array = array![]; + let mut i = data.len(); + while i != 0 { + rev.append(*data.at(i - 1)); + i -= 1; + }; + + rev.span() +} + diff --git a/src/operators/nn/implementations/nn_fp16x16.cairo b/src/operators/nn/implementations/nn_fp16x16.cairo index 1c018ade3..a41801667 100644 --- a/src/operators/nn/implementations/nn_fp16x16.cairo +++ b/src/operators/nn/implementations/nn_fp16x16.cairo @@ -11,6 +11,7 @@ use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ use orion::operators::tensor::implementations::tensor_fp16x16wide::{ FP16x16WTensor, FP16x16WTensorDiv, FP16x16WTensorAdd }; +use orion::operators::nn::{AUTO_PAD, MODE, PADDING_MODE}; impl FP16x16NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -85,8 +86,8 @@ impl FP16x16NN of NNTrait { X: @Tensor, grid: @Tensor, align_corner: Option, - mode: Option, - padding_mode: Option, + mode: Option, + padding_mode: Option, ) -> Tensor { functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode) } diff --git a/src/operators/nn/implementations/nn_fp32x32.cairo b/src/operators/nn/implementations/nn_fp32x32.cairo index a5725eccb..4c9d4f4f8 100644 --- a/src/operators/nn/implementations/nn_fp32x32.cairo +++ b/src/operators/nn/implementations/nn_fp32x32.cairo @@ -5,6 +5,7 @@ use orion::numbers::fixed_point::implementations::fp32x32::core::{FP32x32, FP32x use orion::operators::tensor::implementations::tensor_fp32x32::{ FP32x32Tensor, FP32x32TensorDiv, FP32x32TensorAdd }; +use orion::operators::nn::{AUTO_PAD, MODE, PADDING_MODE}; impl FP32x32NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -79,8 +80,8 @@ impl FP32x32NN of NNTrait { X: @Tensor, grid: @Tensor, align_corner: Option, - mode: Option, - padding_mode: Option, + mode: Option, + padding_mode: Option, ) -> Tensor { functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode) } diff --git a/src/operators/nn/implementations/nn_fp64x64.cairo b/src/operators/nn/implementations/nn_fp64x64.cairo index 01a3b30ad..887efa246 100644 --- a/src/operators/nn/implementations/nn_fp64x64.cairo +++ b/src/operators/nn/implementations/nn_fp64x64.cairo @@ -5,6 +5,7 @@ use orion::numbers::fixed_point::implementations::fp64x64::core::{FP64x64, FP64x use orion::operators::tensor::implementations::tensor_fp64x64::{ FP64x64Tensor, FP64x64TensorDiv, FP64x64TensorAdd }; +use orion::operators::nn::{AUTO_PAD, MODE, PADDING_MODE}; impl FP64x64NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -79,8 +80,8 @@ impl FP64x64NN of NNTrait { X: @Tensor, grid: @Tensor, align_corner: Option, - mode: Option, - padding_mode: Option, + mode: Option, + padding_mode: Option, ) -> Tensor { functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode) } diff --git a/src/operators/nn/implementations/nn_fp8x23.cairo b/src/operators/nn/implementations/nn_fp8x23.cairo index d80d2c323..ec3085c74 100644 --- a/src/operators/nn/implementations/nn_fp8x23.cairo +++ b/src/operators/nn/implementations/nn_fp8x23.cairo @@ -9,6 +9,7 @@ use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ FP8x23WImpl, FP8x23WTryIntoFP8x23, FP8x23W, FP8x23IntoFP8x23W }; use orion::operators::tensor::implementations::tensor_fp8x23wide::{FP8x23WTensor}; +use orion::operators::nn::{AUTO_PAD, MODE, PADDING_MODE}; impl FP8x23NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -81,8 +82,8 @@ impl FP8x23NN of NNTrait { X: @Tensor, grid: @Tensor, align_corner: Option, - mode: Option, - padding_mode: Option, + mode: Option, + padding_mode: Option, ) -> Tensor { functional::grid_sample::grid_sample(X, grid, align_corner, mode, padding_mode) } @@ -102,7 +103,7 @@ impl FP8x23NN of NNTrait { X: @Tensor, W: @Tensor, B: Option<@Tensor>, - auto_pad: Option, + auto_pad: Option, dilations: Option>, group: Option, kernel_shape: Option>, @@ -130,7 +131,7 @@ impl FP8x23NN of NNTrait { X: @Tensor, W: @Tensor, B: Option>, - auto_pad: Option, + auto_pad: Option, dilations: Option>, group: Option, kernel_shape: Option>, diff --git a/src/operators/nn/implementations/nn_i32.cairo b/src/operators/nn/implementations/nn_i32.cairo index 29a94d288..6a0b579f4 100644 --- a/src/operators/nn/implementations/nn_i32.cairo +++ b/src/operators/nn/implementations/nn_i32.cairo @@ -2,6 +2,7 @@ use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::operators::tensor::implementations::tensor_i32::{I32Tensor, I32TensorAdd}; +use orion::operators::nn::{AUTO_PAD, MODE, PADDING_MODE}; impl I32NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -72,8 +73,8 @@ impl I32NN of NNTrait { X: @Tensor, grid: @Tensor, align_corner: Option, - mode: Option, - padding_mode: Option, + mode: Option, + padding_mode: Option, ) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/nn/implementations/nn_i8.cairo b/src/operators/nn/implementations/nn_i8.cairo index e22de6b43..29a52e379 100644 --- a/src/operators/nn/implementations/nn_i8.cairo +++ b/src/operators/nn/implementations/nn_i8.cairo @@ -2,6 +2,7 @@ use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::operators::tensor::implementations::tensor_i8::{I8Tensor, I8TensorAdd}; +use orion::operators::nn::{AUTO_PAD, MODE, PADDING_MODE}; impl I8NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -72,8 +73,8 @@ impl I8NN of NNTrait { X: @Tensor, grid: @Tensor, align_corner: Option, - mode: Option, - padding_mode: Option, + mode: Option, + padding_mode: Option, ) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/nn/implementations/nn_u32.cairo b/src/operators/nn/implementations/nn_u32.cairo index 7352b7ad9..c31b59955 100644 --- a/src/operators/nn/implementations/nn_u32.cairo +++ b/src/operators/nn/implementations/nn_u32.cairo @@ -2,6 +2,7 @@ use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::operators::tensor::implementations::tensor_u32::{U32Tensor, U32TensorAdd}; +use orion::operators::nn::{AUTO_PAD, MODE, PADDING_MODE}; impl U32NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -72,8 +73,8 @@ impl U32NN of NNTrait { X: @Tensor, grid: @Tensor, align_corner: Option, - mode: Option, - padding_mode: Option, + mode: Option, + padding_mode: Option, ) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/math/resize.cairo b/src/operators/tensor/math/resize.cairo index ab0ef86f7..18917454e 100644 --- a/src/operators/tensor/math/resize.cairo +++ b/src/operators/tensor/math/resize.cairo @@ -5,6 +5,7 @@ use orion::operators::tensor::{ TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor }; use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; +use orion::operators::nn::helpers::{get_all_coord}; #[derive(Copy, Drop)] enum MODE { @@ -434,12 +435,14 @@ fn interpolate_nd< i += 1; }; - let mut ret = cartesian(ret.span()); + //let mut ret = cartesian(ret.span()); + let mut ret = get_all_coord(output_size); let mut ret_data = array![]; loop { match ret.pop_front() { Option::Some(X) => { + let X = *X; let mut x: Array = array![]; let mut i = 0; while i != X.len() { @@ -476,94 +479,6 @@ fn interpolate_nd< TensorTrait::new(output_size, ret_data.span()) } -fn cartesian(mut arrays: Span>,) -> Array> { - let mut n = 1; - let mut i = arrays.len() - 1; - loop { - n = n * (*(arrays.at(i))).len(); - if i == 0 { - break; - } - i -= 1; - }; - - let mut i = 0; - let mut size_arrays = array![]; - while i != arrays.len() { - size_arrays.append((*(arrays.at(i))).len()); - i += 1; - }; - - let size_arrays = size_arrays.span(); - let mut output_arrays = array![]; - let mut m = n; - - let mut i = 0; - while i != arrays.len() { - m = m / (*(arrays.at(i))).len(); - let mut out = repeat(*(arrays.at(i)), m); - out = repeat_2(out, size_arrays, i); - - output_arrays.append(out); - i += 1; - }; - - let output_arrays = output_arrays.span(); - - let mut i = 0; - let mut ret = array![]; - while i != n { - let mut j = 0; - let mut x = array![]; - while j != arrays.len() { - x.append(*(output_arrays.at(j)).at(i)); - j += 1; - }; - - ret.append(x); - i += 1; - }; - - ret -} - -fn repeat_2(mut array: Array, size_array: Span, index: usize) -> Array { - let mut size = array.len(); - let mut i = 0; - while i != index { - let mut j = 1; - while j != *size_array.at(index - 1 - i) { - let mut k = 0; - while k != size { - array.append(*array.at(k)); - k += 1; - }; - - j += 1; - }; - - size = size * *size_array.at(index - 1 - i); - i += 1; - }; - - array -} - -fn repeat(array: Span, m: usize,) -> Array { - let mut out = array![]; - let mut j = 0; - while j != array.len() { - let mut k = 0; - while k != m { - out.append(*array.at(j)); - k += 1; - }; - - j += 1; - }; - - out -} fn interpolate_nd_with_x< T, diff --git a/tests/nodes/conv_2D_with_autopad_same.cairo b/tests/nodes/conv_2D_with_autopad_same.cairo index b3c88bdf4..aa0816386 100644 --- a/tests/nodes/conv_2D_with_autopad_same.cairo +++ b/tests/nodes/conv_2D_with_autopad_same.cairo @@ -9,7 +9,7 @@ use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP16x16TensorPartialEq; use orion::operators::nn::FP16x16NN; -use orion::operators::nn::functional::conv::AUTO_PAD; +use orion::operators::nn::AUTO_PAD; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/grid_sample_cubic.cairo b/tests/nodes/grid_sample_cubic.cairo index fc790006b..4fa55b3df 100644 --- a/tests/nodes/grid_sample_cubic.cairo +++ b/tests/nodes/grid_sample_cubic.cairo @@ -8,7 +8,7 @@ use orion::numbers::FixedTrait; use orion::operators::tensor::FP16x16TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::nn::NNTrait; -use orion::operators::nn::functional::grid_sample::MODE; +use orion::operators::nn::{MODE}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/grid_sample_nearest.cairo b/tests/nodes/grid_sample_nearest.cairo index 3a5dc4a07..8915f3468 100644 --- a/tests/nodes/grid_sample_nearest.cairo +++ b/tests/nodes/grid_sample_nearest.cairo @@ -8,7 +8,7 @@ use orion::numbers::FixedTrait; use orion::operators::tensor::FP16x16TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::nn::NNTrait; -use orion::operators::nn::functional::grid_sample::MODE; +use orion::operators::nn::{MODE}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/grid_sample_nearest_aligncorner.cairo b/tests/nodes/grid_sample_nearest_aligncorner.cairo index 6e24295b3..7731f867a 100644 --- a/tests/nodes/grid_sample_nearest_aligncorner.cairo +++ b/tests/nodes/grid_sample_nearest_aligncorner.cairo @@ -8,7 +8,7 @@ use orion::numbers::FixedTrait; use orion::operators::tensor::FP16x16TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::nn::NNTrait; -use orion::operators::nn::functional::grid_sample::MODE; +use orion::operators::nn::{MODE}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/grid_sample_padding_border.cairo b/tests/nodes/grid_sample_padding_border.cairo index 135b43eef..bff7b1461 100644 --- a/tests/nodes/grid_sample_padding_border.cairo +++ b/tests/nodes/grid_sample_padding_border.cairo @@ -8,7 +8,7 @@ use orion::numbers::FixedTrait; use orion::operators::tensor::FP16x16TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::nn::NNTrait; -use orion::operators::nn::functional::grid_sample::PADDING_MODE; +use orion::operators::nn::{PADDING_MODE}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/grid_sample_padding_reflection.cairo b/tests/nodes/grid_sample_padding_reflection.cairo index 54590e0ba..561780a87 100644 --- a/tests/nodes/grid_sample_padding_reflection.cairo +++ b/tests/nodes/grid_sample_padding_reflection.cairo @@ -8,7 +8,7 @@ use orion::numbers::FixedTrait; use orion::operators::tensor::FP16x16TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::nn::NNTrait; -use orion::operators::nn::functional::grid_sample::PADDING_MODE; +use orion::operators::nn::{PADDING_MODE}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/grid_sample_padding_zeros.cairo b/tests/nodes/grid_sample_padding_zeros.cairo index b7ff7c6b3..d3c7db69c 100644 --- a/tests/nodes/grid_sample_padding_zeros.cairo +++ b/tests/nodes/grid_sample_padding_zeros.cairo @@ -8,7 +8,7 @@ use orion::numbers::FixedTrait; use orion::operators::tensor::FP16x16TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::nn::NNTrait; -use orion::operators::nn::functional::grid_sample::PADDING_MODE; +use orion::operators::nn::{PADDING_MODE}; #[test] #[available_gas(2000000000)] From 1b8b74a859746e0658822ef40a53a007430e8a01 Mon Sep 17 00:00:00 2001 From: chachaleo Date: Thu, 7 Mar 2024 10:24:05 +0100 Subject: [PATCH 11/68] fix: small fix --- src/operators/nn/helpers.cairo | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/operators/nn/helpers.cairo b/src/operators/nn/helpers.cairo index a2d5c0b89..92d7656c9 100644 --- a/src/operators/nn/helpers.cairo +++ b/src/operators/nn/helpers.cairo @@ -173,12 +173,12 @@ fn prod, +Copy, +NumberTrait, +TensorTrait, +MulE /// # Returns /// * `prod` - `T`, result of the product. fn prod_on_subset, +Copy, +NumberTrait, +TensorTrait, +Mul,>( - pA: Span, start: usize + a: Span, start: usize ) -> T { let mut i = start; let mut prod = NumberTrait::one(); - while i != pA.len() { - prod = prod * (*pA.at(i)); + while i != a.len() { + prod = prod * (*a.at(i)); i += 1; }; From 493196b628fb73f8171821e9a7150ed78d7babdd Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Wed, 20 Mar 2024 17:50:41 +0100 Subject: [PATCH 12/68] refactor reduce_sum and create a reduce_sum_single_axis variant --- .tool-versions | 2 +- src/operators/nn/functional/logsoftmax.cairo | 4 +- src/operators/nn/functional/softmax.cairo | 4 +- src/operators/tensor/core.cairo | 45 +++++++++++++- .../tensor/implementations/tensor_bool.cairo | 22 ++++--- .../implementations/tensor_complex64.cairo | 31 +++++++--- .../implementations/tensor_fp16x16.cairo | 29 ++++++--- .../implementations/tensor_fp16x16wide.cairo | 29 ++++++--- .../implementations/tensor_fp32x32.cairo | 30 +++++++--- .../implementations/tensor_fp64x64.cairo | 29 ++++++--- .../implementations/tensor_fp8x23.cairo | 28 ++++++--- .../implementations/tensor_fp8x23wide.cairo | 31 +++++++--- .../tensor/implementations/tensor_i32.cairo | 23 +++++--- .../tensor/implementations/tensor_i8.cairo | 26 +++++--- .../tensor/implementations/tensor_u32.cairo | 24 +++++--- .../tensor/math/layer_normalization.cairo | 6 +- src/operators/tensor/math/reduce_l1.cairo | 3 +- src/operators/tensor/math/reduce_l2.cairo | 9 ++- .../tensor/math/reduce_log_sum.cairo | 3 +- src/operators/tensor/math/reduce_sum.cairo | 59 ++++++++++++++++++- .../tensor/math/reduce_sum_square.cairo | 3 +- tests/lib.cairo | 12 ++-- 22 files changed, 345 insertions(+), 107 deletions(-) diff --git a/.tool-versions b/.tool-versions index ebe254233..e1290964b 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1 +1 @@ -scarb 2.5.3 +scarb 2.6.4 diff --git a/src/operators/nn/functional/logsoftmax.cairo b/src/operators/nn/functional/logsoftmax.cairo index fdf89c43d..33df374f0 100644 --- a/src/operators/nn/functional/logsoftmax.cairo +++ b/src/operators/nn/functional/logsoftmax.cairo @@ -10,7 +10,7 @@ fn logsoftmax< z: @Tensor, axis: usize ) -> Tensor { let exp_tensor = z.exp(); - let sum = exp_tensor.reduce_sum(axis, true); + let sum = exp_tensor.reduce_sum_single_axis(axis, true); let softmax = exp_tensor / sum; let logsoftmax = softmax.log(); @@ -38,7 +38,7 @@ fn logsoftmaxWide< z: @Tensor, axis: usize ) -> Tensor { let exp_tensor: Tensor = exp_upcast(*z); - let sum = exp_tensor.reduce_sum(axis, true); + let sum = exp_tensor.reduce_sum_single_axis(axis, true); let softmax = div_downcast(@exp_tensor, @sum); softmax.log() diff --git a/src/operators/nn/functional/softmax.cairo b/src/operators/nn/functional/softmax.cairo index 10602bde7..e8be7953d 100644 --- a/src/operators/nn/functional/softmax.cairo +++ b/src/operators/nn/functional/softmax.cairo @@ -13,7 +13,7 @@ fn softmax< z: @Tensor, axis: usize ) -> Tensor { let exp_tensor = z.exp(); - let sum = exp_tensor.reduce_sum(axis, true); + let sum = exp_tensor.reduce_sum_single_axis(axis, true); exp_tensor / sum } @@ -39,7 +39,7 @@ fn softmaxWide< z: @Tensor, axis: usize ) -> Tensor { let exp_tensor: Tensor = exp_upcast(*z); - let sum = exp_tensor.reduce_sum(axis, true); + let sum = exp_tensor.reduce_sum_single_axis(axis, true); div_downcast(@exp_tensor, @sum) } diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 0d21a4de3..2db4cae78 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -675,7 +675,50 @@ trait TensorTrait { /// >>> [[4,6],[8,10]] /// ``` /// - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; + fn reduce_sum( + self: @Tensor, axes: Option>, keepdims: bool, noop_with_empty_axes: bool + ) -> Tensor; + /// ## tensor.reduce_sum + /// + /// ```rust + /// fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; + /// ``` + /// + /// Reduces a tensor by summing its elements along a specified axis. + /// + /// ## Args + /// + /// * `self`(`@Tensor`) - The input tensor. + /// * `axis`(`usize`) - The dimension to reduce. + /// * `keepdims`(`bool`) - If true, retains reduced dimensions with length 1. + /// + /// ## Panics + /// + /// * Panics if axis is not in the range of the input tensor's dimensions. + /// + /// ## Returns + /// + /// A new `Tensor` instance with the specified axis reduced by summing its elements. + /// + /// ## Examples + /// + /// ```rust + /// use core::array::{ArrayTrait, SpanTrait}; + /// + /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; + /// + /// fn reduce_sum_example() -> Tensor { + /// let tensor = TensorTrait::::new( + /// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(), + /// ); + /// + /// // We can call `reduce_sum` function as follows. + /// return tensor.reduce_sum(axis: 0, keepdims: false); + /// } + /// >>> [[4,6],[8,10]] + /// ``` + /// + fn reduce_sum_single_axis(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; /// # tensor.argmax /// /// ```rust diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index 612a397cc..e8f1750a1 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -64,7 +64,13 @@ impl BoolTensor of TensorTrait { reshape(self, target_shape) } - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + fn reduce_sum( + self: @Tensor, axes: Option>, keepdims: bool, noop_with_empty_axes: bool + ) -> Tensor { + panic(array!['not supported!']) + } + + fn reduce_sum_single_axis(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { panic(array!['not supported!']) } @@ -570,17 +576,19 @@ impl BoolTryIntobool of TryInto { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 + && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() != 0 && is_eq { - is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); - }; + while lhs.data.len() != 0 + && is_eq { + is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index c9c31ae23..2b9bc8e53 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -73,10 +73,23 @@ impl Complex64Tensor of TensorTrait { reshape(self, target_shape) } - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum::reduce_sum(self, axis, keepdims) + fn reduce_sum( + self: @Tensor, + axes: Option>, + keepdims: bool, + noop_with_empty_axes: bool + ) -> Tensor { + math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) } + + fn reduce_sum_single_axis( + self: @Tensor, axis: usize, keepdims: bool + ) -> Tensor { + math::reduce_sum::reduce_sum_single_axis(self, axis, keepdims) + } + + fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { math::reduce_prod::reduce_prod(self, axis, keepdims) } @@ -668,17 +681,19 @@ fn eq(lhs: @complex64, rhs: @complex64) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 + && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() != 0 && is_eq { - is_eq = eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); - }; + while lhs.data.len() != 0 + && is_eq { + is_eq = eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index a37ed0442..aa5c28e38 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -75,8 +75,19 @@ impl FP16x16Tensor of TensorTrait { reshape(self, target_shape) } - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum::reduce_sum(self, axis, keepdims) + fn reduce_sum( + self: @Tensor, + axes: Option>, + keepdims: bool, + noop_with_empty_axes: bool + ) -> Tensor { + math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) + } + + fn reduce_sum_single_axis( + self: @Tensor, axis: usize, keepdims: bool + ) -> Tensor { + math::reduce_sum::reduce_sum_single_axis(self, axis, keepdims) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { @@ -760,17 +771,19 @@ fn relative_eq(lhs: @FP16x16, rhs: @FP16x16) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 + && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() != 0 && is_eq { - is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); - }; + while lhs.data.len() != 0 + && is_eq { + is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index 2003b28ff..acda2e64b 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -79,8 +79,19 @@ impl FP16x16WTensor of TensorTrait { reshape(self, target_shape) } - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum::reduce_sum(self, axis, keepdims) + fn reduce_sum( + self: @Tensor, + axes: Option>, + keepdims: bool, + noop_with_empty_axes: bool + ) -> Tensor { + math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) + } + + fn reduce_sum_single_axis( + self: @Tensor, axis: usize, keepdims: bool + ) -> Tensor { + math::reduce_sum::reduce_sum_single_axis(self, axis, keepdims) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { @@ -719,17 +730,19 @@ fn relative_eq(lhs: @FP16x16W, rhs: @FP16x16W) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 + && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() != 0 && is_eq { - is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); - }; + while lhs.data.len() != 0 + && is_eq { + is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 4870226a1..2618bfe88 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -72,8 +72,20 @@ impl FP32x32Tensor of TensorTrait { reshape(self, target_shape) } - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum::reduce_sum(self, axis, keepdims) + fn reduce_sum( + self: @Tensor, + axes: Option>, + keepdims: bool, + noop_with_empty_axes: bool + ) -> Tensor { + math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) + } + + + fn reduce_sum_single_axis( + self: @Tensor, axis: usize, keepdims: bool + ) -> Tensor { + math::reduce_sum::reduce_sum_single_axis(self, axis, keepdims) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { @@ -766,17 +778,19 @@ fn relative_eq(lhs: @FP32x32, rhs: @FP32x32) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 + && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() != 0 && is_eq { - is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); - }; + while lhs.data.len() != 0 + && is_eq { + is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 3a7214d18..e99f5647c 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -72,8 +72,19 @@ impl FP64x64Tensor of TensorTrait { reshape(self, target_shape) } - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum::reduce_sum(self, axis, keepdims) + fn reduce_sum( + self: @Tensor, + axes: Option>, + keepdims: bool, + noop_with_empty_axes: bool + ) -> Tensor { + math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) + } + + fn reduce_sum_single_axis( + self: @Tensor, axis: usize, keepdims: bool + ) -> Tensor { + math::reduce_sum::reduce_sum_single_axis(self, axis, keepdims) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { @@ -766,17 +777,19 @@ fn relative_eq(lhs: @FP64x64, rhs: @FP64x64) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 + && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.shape.len() != 0 && is_eq { - is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); - }; + while lhs.shape.len() != 0 + && is_eq { + is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index b4a26d749..61beb1cae 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -72,10 +72,20 @@ impl FP8x23Tensor of TensorTrait { reshape(self, target_shape) } - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum::reduce_sum(self, axis, keepdims) + fn reduce_sum( + self: @Tensor, axes: Option>, keepdims: bool, noop_with_empty_axes: bool + ) -> Tensor { + math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) + } + + + fn reduce_sum_single_axis( + self: @Tensor, axis: usize, keepdims: bool + ) -> Tensor { + math::reduce_sum::reduce_sum_single_axis(self, axis, keepdims) } + fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { math::reduce_prod::reduce_prod(self, axis, keepdims) } @@ -777,17 +787,19 @@ fn relative_eq(lhs: @FP8x23, rhs: @FP8x23) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 + && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() != 0 && is_eq { - is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); - }; + while lhs.data.len() != 0 + && is_eq { + is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index 06a297b69..76657ff4d 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -75,10 +75,23 @@ impl FP8x23WTensor of TensorTrait { reshape(self, target_shape) } - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum::reduce_sum(self, axis, keepdims) + fn reduce_sum( + self: @Tensor, + axes: Option>, + keepdims: bool, + noop_with_empty_axes: bool + ) -> Tensor { + math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) + } + + + fn reduce_sum_single_axis( + self: @Tensor, axis: usize, keepdims: bool + ) -> Tensor { + math::reduce_sum::reduce_sum_single_axis(self, axis, keepdims) } + fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { math::reduce_prod::reduce_prod(self, axis, keepdims) } @@ -720,17 +733,19 @@ fn relative_eq(lhs: @FP8x23W, rhs: @FP8x23W) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 + && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() != 0 && is_eq { - is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); - }; + while lhs.data.len() != 0 + && is_eq { + is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 296876516..8c2ef8cb5 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -72,10 +72,15 @@ impl I32Tensor of TensorTrait { reshape(self, target_shape) } - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum::reduce_sum(self, axis, keepdims) + fn reduce_sum( + self: @Tensor, axes: Option>, keepdims: bool, noop_with_empty_axes: bool + ) -> Tensor { + math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) } + fn reduce_sum_single_axis(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_sum::reduce_sum_single_axis(self, axis, keepdims) + } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { math::reduce_prod::reduce_prod(self, axis, keepdims) @@ -711,17 +716,19 @@ impl I32TensorPartialOrd of PartialOrd> { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 + && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() != 0 && is_eq { - is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); - }; + while lhs.data.len() != 0 + && is_eq { + is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 42d807c68..2fdf7cdf2 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -70,8 +70,16 @@ impl I8Tensor of TensorTrait { reshape(self, target_shape) } - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum::reduce_sum(self, axis, keepdims) + fn reduce_sum( + self: @Tensor, axes: Option>, keepdims: bool, noop_with_empty_axes: bool + ) -> Tensor { + math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) + } + + fn reduce_sum_single_axis( + self: @Tensor, axis: usize, keepdims: bool + ) -> Tensor { + math::reduce_sum::reduce_sum_single_axis(self, axis, keepdims) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { @@ -702,17 +710,19 @@ impl I8TensorPartialOrd of PartialOrd> { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 + && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() == 0 && !is_eq { - is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); - }; + while lhs.data.len() == 0 + && !is_eq { + is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index efb681a86..5cb4505cd 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -69,8 +69,14 @@ impl U32Tensor of TensorTrait { reshape(self, target_shape) } - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum::reduce_sum(self, axis, keepdims) + fn reduce_sum( + self: @Tensor, axes: Option>, keepdims: bool, noop_with_empty_axes: bool + ) -> Tensor { + math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) + } + + fn reduce_sum_single_axis(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_sum::reduce_sum_single_axis(self, axis, keepdims) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { @@ -656,17 +662,19 @@ impl U32TensorPartialOrd of PartialOrd> { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 + && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() != 0 && is_eq { - is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); - }; + while lhs.data.len() != 0 + && is_eq { + is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); + }; is_eq } diff --git a/src/operators/tensor/math/layer_normalization.cairo b/src/operators/tensor/math/layer_normalization.cairo index e61e826f5..5756c2296 100644 --- a/src/operators/tensor/math/layer_normalization.cairo +++ b/src/operators/tensor/math/layer_normalization.cairo @@ -4,6 +4,7 @@ use orion::operators::tensor::{ TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor }; use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl}; +use orion::operators::tensor::math::reduce_sum::reduce_sum_single_axis; /// Cf: TensorTrait::layer_normalization docstring fn layer_normalization< @@ -12,6 +13,7 @@ fn layer_normalization< +TensorTrait, +NumberTrait, +PartialEq, + +AddEq, +Copy, +Drop, +Div>, @@ -90,13 +92,13 @@ fn layer_normalization< one_tensor.append(NumberTrait::one()); let x_mat = self.reshape(shape_matrix.span()); - let x_mean = x_mat.reduce_sum(1, true) + let x_mean = reduce_sum_single_axis(@x_mat, 1, true) / TensorTrait::new(shape_one.span(), col_number_tensor.span()); let x_diff = x_mat - x_mean; let x_squared_diff = x_diff * x_diff; - let variance = x_squared_diff.reduce_sum(1, true) + let variance = reduce_sum_single_axis(@x_squared_diff, 1, true) / TensorTrait::new(shape_one.span(), col_number_tensor.span()); let variance_eps = variance + TensorTrait::new(shape_one.span(), epsilon_tensor.span()); diff --git a/src/operators/tensor/math/reduce_l1.cairo b/src/operators/tensor/math/reduce_l1.cairo index ba2be9215..329b67494 100644 --- a/src/operators/tensor/math/reduce_l1.cairo +++ b/src/operators/tensor/math/reduce_l1.cairo @@ -1,6 +1,7 @@ use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; +use orion::operators::tensor::math::reduce_sum::reduce_sum_single_axis; /// Cf: TensorTrait::reduce_sum docstring fn reduce_l1< @@ -16,5 +17,5 @@ fn reduce_l1< ) -> Tensor { let data_abs = self.abs(); - data_abs.reduce_sum(axis: axis, keepdims: keepdims) + reduce_sum_single_axis(@data_abs, axis: axis, keepdims: keepdims) } diff --git a/src/operators/tensor/math/reduce_l2.cairo b/src/operators/tensor/math/reduce_l2.cairo index 96f4b7245..1fc798096 100644 --- a/src/operators/tensor/math/reduce_l2.cairo +++ b/src/operators/tensor/math/reduce_l2.cairo @@ -3,6 +3,7 @@ use core::debug::PrintTrait; use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; +use orion::operators::tensor::math::reduce_sum::reduce_sum_single_axis; fn square< T, @@ -40,13 +41,14 @@ fn reduce_l2< impl TTensor: TensorTrait, impl TNumber: NumberTrait, impl TMul: Mul, + impl TAddEq: AddEq, impl TCopy: Copy, impl TDrop: Drop, >( self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { let tensor_square = square(self); - let tensor_square_sum = tensor_square.reduce_sum(axis: axis, keepdims: keepdims); + let tensor_square_sum = reduce_sum_single_axis(@tensor_square, axis: axis, keepdims: keepdims); tensor_square_sum.sqrt() } @@ -57,6 +59,7 @@ fn reduce_l2_complex< impl TTensor: TensorTrait, impl TNumber: NumberTrait, impl TMul: Mul, + impl TAddEq: AddEq, impl TCopy: Copy, impl TDrop: Drop, impl TPrint: PrintTrait @@ -64,7 +67,9 @@ fn reduce_l2_complex< self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { let mut tensor_square = square(@self.abs()); - let mut tensor_square_sum = tensor_square.reduce_sum(axis: axis, keepdims: keepdims); + let mut tensor_square_sum = reduce_sum_single_axis( + @tensor_square, axis: axis, keepdims: keepdims + ); tensor_square_sum.sqrt() } diff --git a/src/operators/tensor/math/reduce_log_sum.cairo b/src/operators/tensor/math/reduce_log_sum.cairo index 60a5225cb..2149b72b9 100644 --- a/src/operators/tensor/math/reduce_log_sum.cairo +++ b/src/operators/tensor/math/reduce_log_sum.cairo @@ -1,6 +1,7 @@ use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; +use orion::operators::tensor::math::reduce_sum::reduce_sum_single_axis; /// Cf: TensorTrait::reduce_sum_square docstring fn reduce_log_sum< @@ -15,7 +16,7 @@ fn reduce_log_sum< >( self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { - let tensor_square_sum = self.reduce_sum(axis: axis, keepdims: keepdims); + let tensor_square_sum = reduce_sum_single_axis(self, axis: axis, keepdims: keepdims); let tensor_square_sum_log = tensor_square_sum.log(); tensor_square_sum_log diff --git a/src/operators/tensor/math/reduce_sum.cairo b/src/operators/tensor/math/reduce_sum.cairo index 078345f4a..c5c0293d7 100644 --- a/src/operators/tensor/math/reduce_sum.cairo +++ b/src/operators/tensor/math/reduce_sum.cairo @@ -1,8 +1,10 @@ +use core::array::SpanTrait; +use core::option::OptionTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; -/// Cf: TensorTrait::reduce_sum docstring + fn reduce_sum< T, MAG, @@ -11,6 +13,60 @@ fn reduce_sum< impl TAddEq: AddEq, impl TCopy: Copy, impl TDrop: Drop +>( + self: @Tensor, axes: Option>, keepdims: bool, noop_with_empty_axes: bool +) -> Tensor { + // Handle case when no reduction is needed + if noop_with_empty_axes && (axes.is_none() || axes.unwrap().is_empty()) { + return *self; + } + + let reducer_len = if let Option::Some(axes) = axes { + axes.len() + } else { + (*self.shape).len() + }; + let mut result_tensor = *self; + let mut axis_index = 0; + while axis_index < reducer_len { + let axis = if let Option::Some(axes) = axes { + *axes.at(axis_index) + } else { + axis_index + }; + + result_tensor = + { + let mut output_data: Array = array![]; + let output_shape = reduce_output_shape(result_tensor.shape, axis, keepdims); + let output_data_len = len_from_shape(output_shape); + let mut index: usize = 0; + + while index != output_data_len { + let output_indices = unravel_index(index, output_shape); + let current_sum = accumulate_sum::< + T + >(result_tensor.data, result_tensor.shape, output_indices, axis); + output_data.append(current_sum); + index += 1; + }; + TensorTrait::::new(output_shape, output_data.span()) + }; + + axis_index += 1; + }; + + result_tensor +} + +fn reduce_sum_single_axis< + T, + MAG, + impl TTensor: TensorTrait, + impl TNumber: NumberTrait, + impl TAddEq: AddEq, + impl TCopy: Copy, + impl TDrop: Drop >( self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { @@ -101,3 +157,4 @@ fn accumulate_sum< return acc; } + diff --git a/src/operators/tensor/math/reduce_sum_square.cairo b/src/operators/tensor/math/reduce_sum_square.cairo index b8ad7df99..3f4d2e6c4 100644 --- a/src/operators/tensor/math/reduce_sum_square.cairo +++ b/src/operators/tensor/math/reduce_sum_square.cairo @@ -1,6 +1,7 @@ use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::numbers::fixed_point::core::FixedTrait; +use orion::operators::tensor::math::reduce_sum::reduce_sum_single_axis; fn square< T, @@ -45,7 +46,7 @@ fn reduce_sum_square< self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { let tensor_square = square(self); - let tensor_square_sum = tensor_square.reduce_sum(axis: axis, keepdims: keepdims); + let tensor_square_sum = reduce_sum_single_axis(@tensor_square, axis: axis, keepdims: keepdims); tensor_square_sum } diff --git a/tests/lib.cairo b/tests/lib.cairo index f5cecb77d..eb58139db 100644 --- a/tests/lib.cairo +++ b/tests/lib.cairo @@ -1,7 +1,7 @@ -mod numbers; -mod performance; -mod tensor_core; -mod nodes; -mod ml; -mod operators; +// mod numbers; +// mod performance; +// mod tensor_core; +// mod nodes; +// mod ml; +// mod operators; From 1f87c860d981a07c7a48054bae55987957eaca1d Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Wed, 20 Mar 2024 21:07:02 +0100 Subject: [PATCH 13/68] add reduce_sum_single_axis tests --- nodegen/file_manager.py | 35 +++-- ...educe_sum.py => reduce_sum_single_axis.py} | 132 +++++++++--------- tests/lib.cairo | 12 +- tests/nodes.cairo | 40 +++--- .../reduce_sum_single_axis_fp16x16_1D.cairo | 20 +++ .../input_0.cairo | 15 ++ .../output_0.cairo | 13 ++ ...ce_sum_single_axis_fp16x16_2D_axis_1.cairo | 20 +++ .../input_0.cairo | 17 +++ .../output_0.cairo | 14 ++ ...e_sum_single_axis_fp16x16_2D_default.cairo | 20 +++ .../input_0.cairo | 17 +++ .../output_0.cairo | 14 ++ ..._sum_single_axis_fp16x16_2D_keepdims.cairo | 20 +++ .../input_0.cairo | 17 +++ .../output_0.cairo | 15 ++ .../reduce_sum_single_axis_fp8x23_1D.cairo | 20 +++ .../input_0.cairo | 15 ++ .../output_0.cairo | 13 ++ ...uce_sum_single_axis_fp8x23_2D_axis_1.cairo | 20 +++ .../input_0.cairo | 17 +++ .../output_0.cairo | 14 ++ ...ce_sum_single_axis_fp8x23_2D_default.cairo | 20 +++ .../input_0.cairo | 17 +++ .../output_0.cairo | 14 ++ ...e_sum_single_axis_fp8x23_2D_keepdims.cairo | 20 +++ .../input_0.cairo | 17 +++ .../output_0.cairo | 15 ++ .../nodes/reduce_sum_single_axis_i32_1D.cairo | 20 +++ .../input_0.cairo | 15 ++ .../output_0.cairo | 13 ++ ...reduce_sum_single_axis_i32_2D_axis_1.cairo | 20 +++ .../input_0.cairo | 17 +++ .../output_0.cairo | 14 ++ ...educe_sum_single_axis_i32_2D_default.cairo | 20 +++ .../input_0.cairo | 17 +++ .../output_0.cairo | 14 ++ ...duce_sum_single_axis_i32_2D_keepdims.cairo | 20 +++ .../input_0.cairo | 17 +++ .../output_0.cairo | 15 ++ .../nodes/reduce_sum_single_axis_i8_1D.cairo | 20 +++ .../input_0.cairo | 15 ++ .../output_0.cairo | 13 ++ .../reduce_sum_single_axis_i8_2D_axis_1.cairo | 20 +++ .../input_0.cairo | 17 +++ .../output_0.cairo | 14 ++ ...reduce_sum_single_axis_i8_2D_default.cairo | 20 +++ .../input_0.cairo | 17 +++ .../output_0.cairo | 14 ++ ...educe_sum_single_axis_i8_2D_keepdims.cairo | 20 +++ .../input_0.cairo | 17 +++ .../output_0.cairo | 15 ++ .../nodes/reduce_sum_single_axis_u32_1D.cairo | 20 +++ .../input_0.cairo | 15 ++ .../output_0.cairo | 13 ++ ...reduce_sum_single_axis_u32_2D_axis_1.cairo | 20 +++ .../input_0.cairo | 17 +++ .../output_0.cairo | 14 ++ ...educe_sum_single_axis_u32_2D_default.cairo | 20 +++ .../input_0.cairo | 17 +++ .../output_0.cairo | 14 ++ ...duce_sum_single_axis_u32_2D_keepdims.cairo | 20 +++ .../input_0.cairo | 17 +++ .../output_0.cairo | 15 ++ 64 files changed, 1125 insertions(+), 104 deletions(-) rename nodegen/node/{reduce_sum.py => reduce_sum_single_axis.py} (65%) create mode 100644 tests/nodes/reduce_sum_single_axis_fp16x16_1D.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp16x16_1D/input_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp16x16_1D/output_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp16x16_2D_axis_1.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp16x16_2D_axis_1/input_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp16x16_2D_axis_1/output_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp16x16_2D_default.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp16x16_2D_default/input_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp16x16_2D_default/output_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims/input_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims/output_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp8x23_1D.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp8x23_1D/input_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp8x23_1D/output_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1/input_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1/output_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp8x23_2D_default.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp8x23_2D_default/input_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp8x23_2D_default/output_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims/input_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims/output_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i32_1D.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i32_1D/input_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i32_1D/output_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i32_2D_axis_1.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i32_2D_axis_1/input_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i32_2D_axis_1/output_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i32_2D_default.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i32_2D_default/input_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i32_2D_default/output_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i32_2D_keepdims.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i32_2D_keepdims/input_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i32_2D_keepdims/output_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i8_1D.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i8_1D/input_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i8_1D/output_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i8_2D_axis_1.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i8_2D_axis_1/input_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i8_2D_axis_1/output_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i8_2D_default.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i8_2D_default/input_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i8_2D_default/output_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i8_2D_keepdims.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i8_2D_keepdims/input_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_i8_2D_keepdims/output_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_u32_1D.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_u32_1D/input_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_u32_1D/output_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_u32_2D_axis_1.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_u32_2D_axis_1/input_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_u32_2D_axis_1/output_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_u32_2D_default.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_u32_2D_default/input_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_u32_2D_default/output_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_u32_2D_keepdims.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_u32_2D_keepdims/input_0.cairo create mode 100644 tests/nodes/reduce_sum_single_axis_u32_2D_keepdims/output_0.cairo diff --git a/nodegen/file_manager.py b/nodegen/file_manager.py index 203b6b333..babe26399 100644 --- a/nodegen/file_manager.py +++ b/nodegen/file_manager.py @@ -91,25 +91,36 @@ def base_template( This method generates a list of strings that form the template of a Cairo test function, including module imports, function definition, and assertions. """ - return [ + template = [ *[f"mod input_{i};" for i in range(arg_cnt)], *[f"mod output_{i};" for i in range(out_cnt)], - *[""], - *[""], + "", + "", *[f"use {ref};" for ref in refs], - *[""], - *["#[test]"], - *["#[available_gas(2000000000)]"], - *[f"fn test_{name}()" + " {"], + "", + "#[test]", + "#[available_gas(2000000000)]", + f"fn test_{name}()" + " {", *[f" let input_{i} = input_{i}::input_{i}();" for i in range(arg_cnt)], *[f" let z_{i} = output_{i}::output_{i}();" for i in range(out_cnt)], - *[""], - *[f" let ({', '.join(f'y_{i}' for i in range(out_cnt))}) = {func_sig};"], - *[""], - *[f" assert_eq(y_{i}, z_{i});" for i in range(out_cnt)], - *["}"], + "" ] + # Handling conditional function signature based on the number of outputs + if out_cnt > 1: + template.append(f" let ({', '.join(f'y_{i}' for i in range(out_cnt))}) = {func_sig};") + else: + template.append(f" let y_0 = {func_sig};") + + # Continue appending to the template + template.extend([ + "", + *[f" assert_eq(y_{i}, z_{i});" for i in range(out_cnt)], + "}" + ]) + + return template + @classmethod def sequence_template(cls, name: str, arg_cnt: int, refs: list[str], func_sig: str) -> list[str]: """ diff --git a/nodegen/node/reduce_sum.py b/nodegen/node/reduce_sum_single_axis.py similarity index 65% rename from nodegen/node/reduce_sum.py rename to nodegen/node/reduce_sum_single_axis.py index 111724001..91197c45d 100644 --- a/nodegen/node/reduce_sum.py +++ b/nodegen/node/reduce_sum_single_axis.py @@ -3,21 +3,21 @@ from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl -class Reduce_sum(RunAll): +class Reduce_sum_single_axis(RunAll): @staticmethod - def reduce_sum_u32(): - def reduce_sum_1D(): + def reduce_sum_single_axis_u32(): + def reduce_sum_single_axis_1D(): x = np.array([0, 1, 2,]).astype(np.uint32) y = np.array([3]).astype(np.uint32) x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - name = "reduce_sum_u32_1D" + name = "reduce_sum_single_axis_u32_1D" make_test( - [x], y, "input_0.reduce_sum(0, false)", name) + [x], y, "input_0.reduce_sum_single_axis(0, false)", name) - def reduce_sum_2D(): + def reduce_sum_single_axis_2D(): def default(): x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) y = np.array([2, 4]).astype(np.uint32) @@ -25,9 +25,9 @@ def default(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - name = "reduce_sum_u32_2D_default" + name = "reduce_sum_single_axis_u32_2D_default" make_test( - [x], y, "input_0.reduce_sum(0, false)", name) + [x], y, "input_0.reduce_sum_single_axis(0, false)", name) def keepdims(): x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) @@ -36,9 +36,9 @@ def keepdims(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - name = "reduce_sum_u32_2D_keepdims" + name = "reduce_sum_single_axis_u32_2D_keepdims" make_test( - [x], y, "input_0.reduce_sum(0, true)", name) + [x], y, "input_0.reduce_sum_single_axis(0, true)", name) def axis_1(): x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) @@ -47,30 +47,30 @@ def axis_1(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - name = "reduce_sum_u32_2D_axis_1" + name = "reduce_sum_single_axis_u32_2D_axis_1" make_test( - [x], y, "input_0.reduce_sum(1, false)", name) + [x], y, "input_0.reduce_sum_single_axis(1, false)", name) default() keepdims() axis_1() - reduce_sum_1D() - reduce_sum_2D() + reduce_sum_single_axis_1D() + reduce_sum_single_axis_2D() @staticmethod - def reduce_sum_i32(): - def reduce_sum_1D(): + def reduce_sum_single_axis_i32(): + def reduce_sum_single_axis_1D(): x = np.array([0, 1, 2,]).astype(np.int32) y = np.array([3]).astype(np.int32) x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - name = "reduce_sum_i32_1D" + name = "reduce_sum_single_axis_i32_1D" make_test( - [x], y, "input_0.reduce_sum(0, false)", name) + [x], y, "input_0.reduce_sum_single_axis(0, false)", name) - def reduce_sum_2D(): + def reduce_sum_single_axis_2D(): def default(): x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) y = np.array([2, 4]).astype(np.int32) @@ -78,9 +78,9 @@ def default(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - name = "reduce_sum_i32_2D_default" + name = "reduce_sum_single_axis_i32_2D_default" make_test( - [x], y, "input_0.reduce_sum(0, false)", name) + [x], y, "input_0.reduce_sum_single_axis(0, false)", name) def keepdims(): x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) @@ -89,9 +89,9 @@ def keepdims(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - name = "reduce_sum_i32_2D_keepdims" + name = "reduce_sum_single_axis_i32_2D_keepdims" make_test( - [x], y, "input_0.reduce_sum(0, true)", name) + [x], y, "input_0.reduce_sum_single_axis(0, true)", name) def axis_1(): x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) @@ -100,30 +100,30 @@ def axis_1(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - name = "reduce_sum_i32_2D_axis_1" + name = "reduce_sum_single_axis_i32_2D_axis_1" make_test( - [x], y, "input_0.reduce_sum(1, false)", name) + [x], y, "input_0.reduce_sum_single_axis(1, false)", name) default() keepdims() axis_1() - reduce_sum_1D() - reduce_sum_2D() + reduce_sum_single_axis_1D() + reduce_sum_single_axis_2D() @staticmethod - def reduce_sum_i8(): - def reduce_sum_1D(): + def reduce_sum_single_axis_i8(): + def reduce_sum_single_axis_1D(): x = np.array([0, 1, 2,]).astype(np.int8) y = np.array([3]).astype(np.int8) x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) - name = "reduce_sum_i8_1D" + name = "reduce_sum_single_axis_i8_1D" make_test( - [x], y, "input_0.reduce_sum(0, false)", name) + [x], y, "input_0.reduce_sum_single_axis(0, false)", name) - def reduce_sum_2D(): + def reduce_sum_single_axis_2D(): def default(): x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) y = np.array([2, 4]).astype(np.int8) @@ -131,9 +131,9 @@ def default(): x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) - name = "reduce_sum_i8_2D_default" + name = "reduce_sum_single_axis_i8_2D_default" make_test( - [x], y, "input_0.reduce_sum(0, false)", name) + [x], y, "input_0.reduce_sum_single_axis(0, false)", name) def keepdims(): x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) @@ -142,9 +142,9 @@ def keepdims(): x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) - name = "reduce_sum_i8_2D_keepdims" + name = "reduce_sum_single_axis_i8_2D_keepdims" make_test( - [x], y, "input_0.reduce_sum(0, true)", name) + [x], y, "input_0.reduce_sum_single_axis(0, true)", name) def axis_1(): x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) @@ -153,19 +153,19 @@ def axis_1(): x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) - name = "reduce_sum_i8_2D_axis_1" + name = "reduce_sum_single_axis_i8_2D_axis_1" make_test( - [x], y, "input_0.reduce_sum(1, false)", name) + [x], y, "input_0.reduce_sum_single_axis(1, false)", name) default() keepdims() axis_1() - reduce_sum_1D() - reduce_sum_2D() + reduce_sum_single_axis_1D() + reduce_sum_single_axis_2D() @staticmethod - def reduce_sum_fp8x23(): - def reduce_sum_1D(): + def reduce_sum_single_axis_fp8x23(): + def reduce_sum_single_axis_1D(): x = np.array([0, 1, 2,]).astype(np.int64) y = np.array([3]).astype(np.int64) @@ -174,11 +174,11 @@ def reduce_sum_1D(): y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - name = "reduce_sum_fp8x23_1D" + name = "reduce_sum_single_axis_fp8x23_1D" make_test( - [x], y, "input_0.reduce_sum(0, false)", name) + [x], y, "input_0.reduce_sum_single_axis(0, false)", name) - def reduce_sum_2D(): + def reduce_sum_single_axis_2D(): def default(): x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) y = np.array([2, 4]).astype(np.int64) @@ -188,9 +188,9 @@ def default(): y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - name = "reduce_sum_fp8x23_2D_default" + name = "reduce_sum_single_axis_fp8x23_2D_default" make_test( - [x], y, "input_0.reduce_sum(0, false)", name) + [x], y, "input_0.reduce_sum_single_axis(0, false)", name) def keepdims(): x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) @@ -201,9 +201,9 @@ def keepdims(): y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - name = "reduce_sum_fp8x23_2D_keepdims" + name = "reduce_sum_single_axis_fp8x23_2D_keepdims" make_test( - [x], y, "input_0.reduce_sum(0, true)", name) + [x], y, "input_0.reduce_sum_single_axis(0, true)", name) def axis_1(): x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) @@ -214,20 +214,20 @@ def axis_1(): y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - name = "reduce_sum_fp8x23_2D_axis_1" + name = "reduce_sum_single_axis_fp8x23_2D_axis_1" make_test( - [x], y, "input_0.reduce_sum(1, false)", name) + [x], y, "input_0.reduce_sum_single_axis(1, false)", name) default() keepdims() axis_1() - reduce_sum_1D() - reduce_sum_2D() + reduce_sum_single_axis_1D() + reduce_sum_single_axis_2D() @staticmethod - def reduce_sum_fp16x16(): - def reduce_sum_1D(): + def reduce_sum_single_axis_fp16x16(): + def reduce_sum_single_axis_1D(): x = np.array([0, 1, 2,]).astype(np.int64) y = np.array([3]).astype(np.int64) @@ -236,11 +236,11 @@ def reduce_sum_1D(): y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - name = "reduce_sum_fp16x16_1D" + name = "reduce_sum_single_axis_fp16x16_1D" make_test( - [x], y, "input_0.reduce_sum(0, false)", name) + [x], y, "input_0.reduce_sum_single_axis(0, false)", name) - def reduce_sum_2D(): + def reduce_sum_single_axis_2D(): def default(): x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) y = np.array([2, 4]).astype(np.int64) @@ -250,9 +250,9 @@ def default(): y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - name = "reduce_sum_fp16x16_2D_default" + name = "reduce_sum_single_axis_fp16x16_2D_default" make_test( - [x], y, "input_0.reduce_sum(0, false)", name) + [x], y, "input_0.reduce_sum_single_axis(0, false)", name) def keepdims(): x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) @@ -263,9 +263,9 @@ def keepdims(): y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - name = "reduce_sum_fp16x16_2D_keepdims" + name = "reduce_sum_single_axis_fp16x16_2D_keepdims" make_test( - [x], y, "input_0.reduce_sum(0, true)", name) + [x], y, "input_0.reduce_sum_single_axis(0, true)", name) def axis_1(): x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) @@ -276,13 +276,13 @@ def axis_1(): y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - name = "reduce_sum_fp16x16_2D_axis_1" + name = "reduce_sum_single_axis_fp16x16_2D_axis_1" make_test( - [x], y, "input_0.reduce_sum(1, false)", name) + [x], y, "input_0.reduce_sum_single_axis(1, false)", name) default() keepdims() axis_1() - reduce_sum_1D() - reduce_sum_2D() + reduce_sum_single_axis_1D() + reduce_sum_single_axis_2D() diff --git a/tests/lib.cairo b/tests/lib.cairo index eb58139db..f5cecb77d 100644 --- a/tests/lib.cairo +++ b/tests/lib.cairo @@ -1,7 +1,7 @@ -// mod numbers; -// mod performance; -// mod tensor_core; -// mod nodes; -// mod ml; -// mod operators; +mod numbers; +mod performance; +mod tensor_core; +mod nodes; +mod ml; +mod operators; diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 29bebb762..28948e5c5 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -278,26 +278,6 @@ mod or_i8; mod or_i8_broadcast; mod or_u32; mod or_u32_broadcast; -mod reduce_sum_fp16x16_1D; -mod reduce_sum_fp16x16_2D_default; -mod reduce_sum_fp16x16_2D_keepdims; -mod reduce_sum_fp16x16_2D_axis_1; -mod reduce_sum_fp8x23_1D; -mod reduce_sum_fp8x23_2D_default; -mod reduce_sum_fp8x23_2D_keepdims; -mod reduce_sum_fp8x23_2D_axis_1; -mod reduce_sum_i32_1D; -mod reduce_sum_i32_2D_default; -mod reduce_sum_i32_2D_keepdims; -mod reduce_sum_i32_2D_axis_1; -mod reduce_sum_i8_1D; -mod reduce_sum_i8_2D_default; -mod reduce_sum_i8_2D_keepdims; -mod reduce_sum_i8_2D_axis_1; -mod reduce_sum_u32_1D; -mod reduce_sum_u32_2D_default; -mod reduce_sum_u32_2D_keepdims; -mod reduce_sum_u32_2D_axis_1; mod relu_fp16x16; mod relu_fp8x23; mod relu_i32; @@ -1047,3 +1027,23 @@ mod label_encoder_fp8x23_default; mod label_encoder_i8_default; mod label_encoder_i32_default; mod label_encoder_u32_default; +mod reduce_sum_single_axis_fp16x16_1D; +mod reduce_sum_single_axis_fp16x16_2D_default; +mod reduce_sum_single_axis_fp16x16_2D_keepdims; +mod reduce_sum_single_axis_fp16x16_2D_axis_1; +mod reduce_sum_single_axis_fp8x23_1D; +mod reduce_sum_single_axis_fp8x23_2D_default; +mod reduce_sum_single_axis_fp8x23_2D_keepdims; +mod reduce_sum_single_axis_fp8x23_2D_axis_1; +mod reduce_sum_single_axis_i32_1D; +mod reduce_sum_single_axis_i32_2D_default; +mod reduce_sum_single_axis_i32_2D_keepdims; +mod reduce_sum_single_axis_i32_2D_axis_1; +mod reduce_sum_single_axis_i8_1D; +mod reduce_sum_single_axis_i8_2D_default; +mod reduce_sum_single_axis_i8_2D_keepdims; +mod reduce_sum_single_axis_i8_2D_axis_1; +mod reduce_sum_single_axis_u32_1D; +mod reduce_sum_single_axis_u32_2D_default; +mod reduce_sum_single_axis_u32_2D_keepdims; +mod reduce_sum_single_axis_u32_2D_axis_1; diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_1D.cairo b/tests/nodes/reduce_sum_single_axis_fp16x16_1D.cairo new file mode 100644 index 000000000..b91251f21 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp16x16_1D.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_single_axis_fp16x16_1D() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum_single_axis(0, false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_1D/input_0.cairo b/tests/nodes/reduce_sum_single_axis_fp16x16_1D/input_0.cairo new file mode 100644 index 000000000..33e05815f --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp16x16_1D/input_0.cairo @@ -0,0 +1,15 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_1D/output_0.cairo b/tests/nodes/reduce_sum_single_axis_fp16x16_1D/output_0.cairo new file mode 100644 index 000000000..db089f852 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp16x16_1D/output_0.cairo @@ -0,0 +1,13 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 196608, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_axis_1.cairo b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_axis_1.cairo new file mode 100644 index 000000000..ab1792bf1 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_axis_1.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_single_axis_fp16x16_2D_axis_1() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum_single_axis(1, false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_axis_1/input_0.cairo b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_axis_1/input_0.cairo new file mode 100644 index 000000000..6a8b7cb09 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_axis_1/input_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_axis_1/output_0.cairo b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_axis_1/output_0.cairo new file mode 100644 index 000000000..11bc960a2 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_axis_1/output_0.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_default.cairo b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_default.cairo new file mode 100644 index 000000000..0faf8ca88 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_default.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_single_axis_fp16x16_2D_default() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum_single_axis(0, false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_default/input_0.cairo b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_default/input_0.cairo new file mode 100644 index 000000000..6a8b7cb09 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_default/input_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_default/output_0.cairo b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_default/output_0.cairo new file mode 100644 index 000000000..d3b81daf0 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_default/output_0.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims.cairo b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims.cairo new file mode 100644 index 000000000..4a587072e --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_single_axis_fp16x16_2D_keepdims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum_single_axis(0, true); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims/input_0.cairo b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims/input_0.cairo new file mode 100644 index 000000000..6a8b7cb09 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims/input_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims/output_0.cairo b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims/output_0.cairo new file mode 100644 index 000000000..cadc8f1f9 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims/output_0.cairo @@ -0,0 +1,15 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_1D.cairo b/tests/nodes/reduce_sum_single_axis_fp8x23_1D.cairo new file mode 100644 index 000000000..9640ee345 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp8x23_1D.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_single_axis_fp8x23_1D() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum_single_axis(0, false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_1D/input_0.cairo b/tests/nodes/reduce_sum_single_axis_fp8x23_1D/input_0.cairo new file mode 100644 index 000000000..0b5fd5c6a --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp8x23_1D/input_0.cairo @@ -0,0 +1,15 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_1D/output_0.cairo b/tests/nodes/reduce_sum_single_axis_fp8x23_1D/output_0.cairo new file mode 100644 index 000000000..713f0d28a --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp8x23_1D/output_0.cairo @@ -0,0 +1,13 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 25165824, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1.cairo b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1.cairo new file mode 100644 index 000000000..84c6bf093 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_single_axis_fp8x23_2D_axis_1() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum_single_axis(1, false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1/input_0.cairo b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1/input_0.cairo new file mode 100644 index 000000000..88c1db446 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1/input_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1/output_0.cairo b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1/output_0.cairo new file mode 100644 index 000000000..4b3e55bbe --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1/output_0.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 41943040, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_default.cairo b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_default.cairo new file mode 100644 index 000000000..5b9eb0fb1 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_default.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_single_axis_fp8x23_2D_default() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum_single_axis(0, false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_default/input_0.cairo b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_default/input_0.cairo new file mode 100644 index 000000000..88c1db446 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_default/input_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_default/output_0.cairo b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_default/output_0.cairo new file mode 100644 index 000000000..12e4ce95f --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_default/output_0.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims.cairo b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims.cairo new file mode 100644 index 000000000..0c4bede22 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_single_axis_fp8x23_2D_keepdims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum_single_axis(0, true); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims/input_0.cairo b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims/input_0.cairo new file mode 100644 index 000000000..88c1db446 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims/input_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims/output_0.cairo b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims/output_0.cairo new file mode 100644 index 000000000..3435050a2 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims/output_0.cairo @@ -0,0 +1,15 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 33554432, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_i32_1D.cairo b/tests/nodes/reduce_sum_single_axis_i32_1D.cairo new file mode 100644 index 000000000..7619c0ee4 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i32_1D.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_single_axis_i32_1D() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum_single_axis(0, false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_single_axis_i32_1D/input_0.cairo b/tests/nodes/reduce_sum_single_axis_i32_1D/input_0.cairo new file mode 100644 index 000000000..a16657995 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i32_1D/input_0.cairo @@ -0,0 +1,15 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_i32_1D/output_0.cairo b/tests/nodes/reduce_sum_single_axis_i32_1D/output_0.cairo new file mode 100644 index 000000000..d1b50c386 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i32_1D/output_0.cairo @@ -0,0 +1,13 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(3); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_i32_2D_axis_1.cairo b/tests/nodes/reduce_sum_single_axis_i32_2D_axis_1.cairo new file mode 100644 index 000000000..9cd61ef22 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i32_2D_axis_1.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_single_axis_i32_2D_axis_1() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum_single_axis(1, false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_single_axis_i32_2D_axis_1/input_0.cairo b/tests/nodes/reduce_sum_single_axis_i32_2D_axis_1/input_0.cairo new file mode 100644 index 000000000..2a164a41a --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i32_2D_axis_1/input_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_i32_2D_axis_1/output_0.cairo b/tests/nodes/reduce_sum_single_axis_i32_2D_axis_1/output_0.cairo new file mode 100644 index 000000000..b9cf7c45d --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i32_2D_axis_1/output_0.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(5); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_i32_2D_default.cairo b/tests/nodes/reduce_sum_single_axis_i32_2D_default.cairo new file mode 100644 index 000000000..cb28aac41 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i32_2D_default.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_single_axis_i32_2D_default() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum_single_axis(0, false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_single_axis_i32_2D_default/input_0.cairo b/tests/nodes/reduce_sum_single_axis_i32_2D_default/input_0.cairo new file mode 100644 index 000000000..2a164a41a --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i32_2D_default/input_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_i32_2D_default/output_0.cairo b/tests/nodes/reduce_sum_single_axis_i32_2D_default/output_0.cairo new file mode 100644 index 000000000..f1b1716d2 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i32_2D_default/output_0.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(2); + data.append(4); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_i32_2D_keepdims.cairo b/tests/nodes/reduce_sum_single_axis_i32_2D_keepdims.cairo new file mode 100644 index 000000000..390f3742c --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i32_2D_keepdims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_single_axis_i32_2D_keepdims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum_single_axis(0, true); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_single_axis_i32_2D_keepdims/input_0.cairo b/tests/nodes/reduce_sum_single_axis_i32_2D_keepdims/input_0.cairo new file mode 100644 index 000000000..2a164a41a --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i32_2D_keepdims/input_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_i32_2D_keepdims/output_0.cairo b/tests/nodes/reduce_sum_single_axis_i32_2D_keepdims/output_0.cairo new file mode 100644 index 000000000..cbcfa21d7 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i32_2D_keepdims/output_0.cairo @@ -0,0 +1,15 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(2); + data.append(4); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_i8_1D.cairo b/tests/nodes/reduce_sum_single_axis_i8_1D.cairo new file mode 100644 index 000000000..7a3059a32 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i8_1D.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_single_axis_i8_1D() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum_single_axis(0, false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_single_axis_i8_1D/input_0.cairo b/tests/nodes/reduce_sum_single_axis_i8_1D/input_0.cairo new file mode 100644 index 000000000..61b70cda3 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i8_1D/input_0.cairo @@ -0,0 +1,15 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 1, sign: false }); + data.append(FP8x23 { mag: 2, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_i8_1D/output_0.cairo b/tests/nodes/reduce_sum_single_axis_i8_1D/output_0.cairo new file mode 100644 index 000000000..3a2fc6193 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i8_1D/output_0.cairo @@ -0,0 +1,13 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 3, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_i8_2D_axis_1.cairo b/tests/nodes/reduce_sum_single_axis_i8_2D_axis_1.cairo new file mode 100644 index 000000000..381c69ab2 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i8_2D_axis_1.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_single_axis_i8_2D_axis_1() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum_single_axis(1, false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_single_axis_i8_2D_axis_1/input_0.cairo b/tests/nodes/reduce_sum_single_axis_i8_2D_axis_1/input_0.cairo new file mode 100644 index 000000000..53f6405e1 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i8_2D_axis_1/input_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 1, sign: false }); + data.append(FP8x23 { mag: 2, sign: false }); + data.append(FP8x23 { mag: 3, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_i8_2D_axis_1/output_0.cairo b/tests/nodes/reduce_sum_single_axis_i8_2D_axis_1/output_0.cairo new file mode 100644 index 000000000..26f2c2987 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i8_2D_axis_1/output_0.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 1, sign: false }); + data.append(FP8x23 { mag: 5, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_i8_2D_default.cairo b/tests/nodes/reduce_sum_single_axis_i8_2D_default.cairo new file mode 100644 index 000000000..09d98cd79 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i8_2D_default.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_single_axis_i8_2D_default() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum_single_axis(0, false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_single_axis_i8_2D_default/input_0.cairo b/tests/nodes/reduce_sum_single_axis_i8_2D_default/input_0.cairo new file mode 100644 index 000000000..53f6405e1 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i8_2D_default/input_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 1, sign: false }); + data.append(FP8x23 { mag: 2, sign: false }); + data.append(FP8x23 { mag: 3, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_i8_2D_default/output_0.cairo b/tests/nodes/reduce_sum_single_axis_i8_2D_default/output_0.cairo new file mode 100644 index 000000000..de769f8c0 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i8_2D_default/output_0.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 2, sign: false }); + data.append(FP8x23 { mag: 4, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_i8_2D_keepdims.cairo b/tests/nodes/reduce_sum_single_axis_i8_2D_keepdims.cairo new file mode 100644 index 000000000..2af82ece3 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i8_2D_keepdims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_single_axis_i8_2D_keepdims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum_single_axis(0, true); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_single_axis_i8_2D_keepdims/input_0.cairo b/tests/nodes/reduce_sum_single_axis_i8_2D_keepdims/input_0.cairo new file mode 100644 index 000000000..53f6405e1 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i8_2D_keepdims/input_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 1, sign: false }); + data.append(FP8x23 { mag: 2, sign: false }); + data.append(FP8x23 { mag: 3, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_i8_2D_keepdims/output_0.cairo b/tests/nodes/reduce_sum_single_axis_i8_2D_keepdims/output_0.cairo new file mode 100644 index 000000000..b45c5c277 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_i8_2D_keepdims/output_0.cairo @@ -0,0 +1,15 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::numbers::{FixedTrait, FP8x23}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 2, sign: false }); + data.append(FP8x23 { mag: 4, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_u32_1D.cairo b/tests/nodes/reduce_sum_single_axis_u32_1D.cairo new file mode 100644 index 000000000..e266d2e04 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_u32_1D.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_single_axis_u32_1D() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum_single_axis(0, false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_single_axis_u32_1D/input_0.cairo b/tests/nodes/reduce_sum_single_axis_u32_1D/input_0.cairo new file mode 100644 index 000000000..a350d75c1 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_u32_1D/input_0.cairo @@ -0,0 +1,15 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_u32_1D/output_0.cairo b/tests/nodes/reduce_sum_single_axis_u32_1D/output_0.cairo new file mode 100644 index 000000000..bd3b7eea9 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_u32_1D/output_0.cairo @@ -0,0 +1,13 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(3); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_u32_2D_axis_1.cairo b/tests/nodes/reduce_sum_single_axis_u32_2D_axis_1.cairo new file mode 100644 index 000000000..fcd06fb15 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_u32_2D_axis_1.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_single_axis_u32_2D_axis_1() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum_single_axis(1, false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_single_axis_u32_2D_axis_1/input_0.cairo b/tests/nodes/reduce_sum_single_axis_u32_2D_axis_1/input_0.cairo new file mode 100644 index 000000000..530126fb5 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_u32_2D_axis_1/input_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_u32_2D_axis_1/output_0.cairo b/tests/nodes/reduce_sum_single_axis_u32_2D_axis_1/output_0.cairo new file mode 100644 index 000000000..1bee2ed67 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_u32_2D_axis_1/output_0.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(5); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_u32_2D_default.cairo b/tests/nodes/reduce_sum_single_axis_u32_2D_default.cairo new file mode 100644 index 000000000..312619beb --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_u32_2D_default.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_single_axis_u32_2D_default() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum_single_axis(0, false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_single_axis_u32_2D_default/input_0.cairo b/tests/nodes/reduce_sum_single_axis_u32_2D_default/input_0.cairo new file mode 100644 index 000000000..530126fb5 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_u32_2D_default/input_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_u32_2D_default/output_0.cairo b/tests/nodes/reduce_sum_single_axis_u32_2D_default/output_0.cairo new file mode 100644 index 000000000..5aeddf000 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_u32_2D_default/output_0.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(2); + data.append(4); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_u32_2D_keepdims.cairo b/tests/nodes/reduce_sum_single_axis_u32_2D_keepdims.cairo new file mode 100644 index 000000000..124af02d5 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_u32_2D_keepdims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_single_axis_u32_2D_keepdims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum_single_axis(0, true); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_single_axis_u32_2D_keepdims/input_0.cairo b/tests/nodes/reduce_sum_single_axis_u32_2D_keepdims/input_0.cairo new file mode 100644 index 000000000..530126fb5 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_u32_2D_keepdims/input_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(3); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_u32_2D_keepdims/output_0.cairo b/tests/nodes/reduce_sum_single_axis_u32_2D_keepdims/output_0.cairo new file mode 100644 index 000000000..5b761b563 --- /dev/null +++ b/tests/nodes/reduce_sum_single_axis_u32_2D_keepdims/output_0.cairo @@ -0,0 +1,15 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(2); + data.append(4); + TensorTrait::new(shape.span(), data.span()) +} From 8e66598527fc7829400bbf5337c165b42d6942fd Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Thu, 21 Mar 2024 08:51:08 +0100 Subject: [PATCH 14/68] refactor reduce_sum --- nodegen/node/reduce_sum.py | 21 + src/operators/tensor/core.cairo | 5 +- .../tensor/implementations/tensor_bool.cairo | 5 +- .../implementations/tensor_complex64.cairo | 6 +- .../implementations/tensor_fp16x16.cairo | 6 +- .../implementations/tensor_fp16x16wide.cairo | 6 +- .../implementations/tensor_fp32x32.cairo | 6 +- .../implementations/tensor_fp64x64.cairo | 6 +- .../implementations/tensor_fp8x23.cairo | 7 +- .../implementations/tensor_fp8x23wide.cairo | 6 +- .../tensor/implementations/tensor_i32.cairo | 7 +- .../tensor/implementations/tensor_i8.cairo | 11 +- .../tensor/implementations/tensor_u32.cairo | 7 +- src/operators/tensor/math.cairo | 1 + .../tensor/math/layer_normalization.cairo | 2 +- src/operators/tensor/math/reduce_l1.cairo | 2 +- src/operators/tensor/math/reduce_l2.cairo | 2 +- .../tensor/math/reduce_log_sum.cairo | 2 +- src/operators/tensor/math/reduce_sum.cairo | 192 +- .../tensor/math/reduce_sum_single_axis.cairo | 106 + .../tensor/math/reduce_sum_square.cairo | 2 +- tests/lib.cairo | 10 +- tests/nodes.cairo | 2099 +++++++++-------- tests/nodes/reduce_sum_keep_dims.cairo | 20 + .../nodes/reduce_sum_keep_dims/input_0.cairo | 26 + .../nodes/reduce_sum_keep_dims/output_0.cairo | 20 + 26 files changed, 1393 insertions(+), 1190 deletions(-) create mode 100644 nodegen/node/reduce_sum.py create mode 100644 src/operators/tensor/math/reduce_sum_single_axis.cairo create mode 100644 tests/nodes/reduce_sum_keep_dims.cairo create mode 100644 tests/nodes/reduce_sum_keep_dims/input_0.cairo create mode 100644 tests/nodes/reduce_sum_keep_dims/output_0.cairo diff --git a/nodegen/node/reduce_sum.py b/nodegen/node/reduce_sum.py new file mode 100644 index 000000000..4f7a213c8 --- /dev/null +++ b/nodegen/node/reduce_sum.py @@ -0,0 +1,21 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl + + +class Reduce_sum(RunAll): + @staticmethod + def reduce_sum_keep_dims(): + axes = np.array([1], dtype=np.uint32) + keepdims = 1 + + x = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [ + [9, 10], [11, 12]]]).astype(np.uint32) + y = np.sum(x, axis=tuple(axes.tolist()), keepdims=keepdims == 1) + + x = Tensor(Dtype.U32, x.shape, x.flatten()) + y = Tensor(Dtype.U32, y.shape, y.flatten()) + + name = "reduce_sum_keep_dims" + make_test( + [x], y, "input_0.reduce_sum(Option::Some(array![1].span()), true, false)", name) diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 2db4cae78..cb140bd70 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -676,7 +676,10 @@ trait TensorTrait { /// ``` /// fn reduce_sum( - self: @Tensor, axes: Option>, keepdims: bool, noop_with_empty_axes: bool + self: @Tensor, + axes: Option>, + keepdims: Option, + noop_with_empty_axes: Option ) -> Tensor; /// ## tensor.reduce_sum /// diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index e8f1750a1..53b5e3060 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -65,7 +65,10 @@ impl BoolTensor of TensorTrait { } fn reduce_sum( - self: @Tensor, axes: Option>, keepdims: bool, noop_with_empty_axes: bool + self: @Tensor, + axes: Option>, + keepdims: Option, + noop_with_empty_axes: Option ) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 2b9bc8e53..0cba48505 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -76,8 +76,8 @@ impl Complex64Tensor of TensorTrait { fn reduce_sum( self: @Tensor, axes: Option>, - keepdims: bool, - noop_with_empty_axes: bool + keepdims: Option, + noop_with_empty_axes: Option ) -> Tensor { math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) } @@ -86,7 +86,7 @@ impl Complex64Tensor of TensorTrait { fn reduce_sum_single_axis( self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { - math::reduce_sum::reduce_sum_single_axis(self, axis, keepdims) + math::reduce_sum_single_axis::reduce_sum_single_axis(self, axis, keepdims) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index aa5c28e38..405f665e1 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -78,8 +78,8 @@ impl FP16x16Tensor of TensorTrait { fn reduce_sum( self: @Tensor, axes: Option>, - keepdims: bool, - noop_with_empty_axes: bool + keepdims: Option, + noop_with_empty_axes: Option ) -> Tensor { math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) } @@ -87,7 +87,7 @@ impl FP16x16Tensor of TensorTrait { fn reduce_sum_single_axis( self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { - math::reduce_sum::reduce_sum_single_axis(self, axis, keepdims) + math::reduce_sum_single_axis::reduce_sum_single_axis(self, axis, keepdims) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index acda2e64b..01057cbf2 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -82,8 +82,8 @@ impl FP16x16WTensor of TensorTrait { fn reduce_sum( self: @Tensor, axes: Option>, - keepdims: bool, - noop_with_empty_axes: bool + keepdims: Option, + noop_with_empty_axes: Option ) -> Tensor { math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) } @@ -91,7 +91,7 @@ impl FP16x16WTensor of TensorTrait { fn reduce_sum_single_axis( self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { - math::reduce_sum::reduce_sum_single_axis(self, axis, keepdims) + math::reduce_sum_single_axis::reduce_sum_single_axis(self, axis, keepdims) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 2618bfe88..cfdb06536 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -75,8 +75,8 @@ impl FP32x32Tensor of TensorTrait { fn reduce_sum( self: @Tensor, axes: Option>, - keepdims: bool, - noop_with_empty_axes: bool + keepdims: Option, + noop_with_empty_axes: Option ) -> Tensor { math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) } @@ -85,7 +85,7 @@ impl FP32x32Tensor of TensorTrait { fn reduce_sum_single_axis( self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { - math::reduce_sum::reduce_sum_single_axis(self, axis, keepdims) + math::reduce_sum_single_axis::reduce_sum_single_axis(self, axis, keepdims) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index e99f5647c..a33e549b5 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -75,8 +75,8 @@ impl FP64x64Tensor of TensorTrait { fn reduce_sum( self: @Tensor, axes: Option>, - keepdims: bool, - noop_with_empty_axes: bool + keepdims: Option, + noop_with_empty_axes: Option ) -> Tensor { math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) } @@ -84,7 +84,7 @@ impl FP64x64Tensor of TensorTrait { fn reduce_sum_single_axis( self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { - math::reduce_sum::reduce_sum_single_axis(self, axis, keepdims) + math::reduce_sum_single_axis::reduce_sum_single_axis(self, axis, keepdims) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index 61beb1cae..d1fcc5477 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -73,7 +73,10 @@ impl FP8x23Tensor of TensorTrait { } fn reduce_sum( - self: @Tensor, axes: Option>, keepdims: bool, noop_with_empty_axes: bool + self: @Tensor, + axes: Option>, + keepdims: Option, + noop_with_empty_axes: Option ) -> Tensor { math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) } @@ -82,7 +85,7 @@ impl FP8x23Tensor of TensorTrait { fn reduce_sum_single_axis( self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { - math::reduce_sum::reduce_sum_single_axis(self, axis, keepdims) + math::reduce_sum_single_axis::reduce_sum_single_axis(self, axis, keepdims) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index 76657ff4d..c061dff21 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -78,8 +78,8 @@ impl FP8x23WTensor of TensorTrait { fn reduce_sum( self: @Tensor, axes: Option>, - keepdims: bool, - noop_with_empty_axes: bool + keepdims: Option, + noop_with_empty_axes: Option ) -> Tensor { math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) } @@ -88,7 +88,7 @@ impl FP8x23WTensor of TensorTrait { fn reduce_sum_single_axis( self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { - math::reduce_sum::reduce_sum_single_axis(self, axis, keepdims) + math::reduce_sum_single_axis::reduce_sum_single_axis(self, axis, keepdims) } diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 8c2ef8cb5..e04b4e8b5 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -73,13 +73,16 @@ impl I32Tensor of TensorTrait { } fn reduce_sum( - self: @Tensor, axes: Option>, keepdims: bool, noop_with_empty_axes: bool + self: @Tensor, + axes: Option>, + keepdims: Option, + noop_with_empty_axes: Option ) -> Tensor { math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) } fn reduce_sum_single_axis(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum::reduce_sum_single_axis(self, axis, keepdims) + math::reduce_sum_single_axis::reduce_sum_single_axis(self, axis, keepdims) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 2fdf7cdf2..e9f076d9d 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -71,15 +71,16 @@ impl I8Tensor of TensorTrait { } fn reduce_sum( - self: @Tensor, axes: Option>, keepdims: bool, noop_with_empty_axes: bool + self: @Tensor, + axes: Option>, + keepdims: Option, + noop_with_empty_axes: Option ) -> Tensor { math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) } - fn reduce_sum_single_axis( - self: @Tensor, axis: usize, keepdims: bool - ) -> Tensor { - math::reduce_sum::reduce_sum_single_axis(self, axis, keepdims) + fn reduce_sum_single_axis(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_sum_single_axis::reduce_sum_single_axis(self, axis, keepdims) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index 5cb4505cd..b8264345a 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -70,13 +70,16 @@ impl U32Tensor of TensorTrait { } fn reduce_sum( - self: @Tensor, axes: Option>, keepdims: bool, noop_with_empty_axes: bool + self: @Tensor, + axes: Option>, + keepdims: Option, + noop_with_empty_axes: Option ) -> Tensor { math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) } fn reduce_sum_single_axis(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum::reduce_sum_single_axis(self, axis, keepdims) + math::reduce_sum_single_axis::reduce_sum_single_axis(self, axis, keepdims) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/math.cairo b/src/operators/tensor/math.cairo index b73f6d102..fb4813ae1 100644 --- a/src/operators/tensor/math.cairo +++ b/src/operators/tensor/math.cairo @@ -3,6 +3,7 @@ mod min; mod max_in_tensor; mod max; mod reduce_sum; +mod reduce_sum_single_axis; mod reduce_prod; mod argmax; mod argmin; diff --git a/src/operators/tensor/math/layer_normalization.cairo b/src/operators/tensor/math/layer_normalization.cairo index 5756c2296..f185b6bb5 100644 --- a/src/operators/tensor/math/layer_normalization.cairo +++ b/src/operators/tensor/math/layer_normalization.cairo @@ -4,7 +4,7 @@ use orion::operators::tensor::{ TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor }; use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl}; -use orion::operators::tensor::math::reduce_sum::reduce_sum_single_axis; +use orion::operators::tensor::math::reduce_sum_single_axis::reduce_sum_single_axis; /// Cf: TensorTrait::layer_normalization docstring fn layer_normalization< diff --git a/src/operators/tensor/math/reduce_l1.cairo b/src/operators/tensor/math/reduce_l1.cairo index 329b67494..8322af094 100644 --- a/src/operators/tensor/math/reduce_l1.cairo +++ b/src/operators/tensor/math/reduce_l1.cairo @@ -1,7 +1,7 @@ use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; -use orion::operators::tensor::math::reduce_sum::reduce_sum_single_axis; +use orion::operators::tensor::math::reduce_sum_single_axis::reduce_sum_single_axis; /// Cf: TensorTrait::reduce_sum docstring fn reduce_l1< diff --git a/src/operators/tensor/math/reduce_l2.cairo b/src/operators/tensor/math/reduce_l2.cairo index 1fc798096..cf5279df2 100644 --- a/src/operators/tensor/math/reduce_l2.cairo +++ b/src/operators/tensor/math/reduce_l2.cairo @@ -3,7 +3,7 @@ use core::debug::PrintTrait; use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; -use orion::operators::tensor::math::reduce_sum::reduce_sum_single_axis; +use orion::operators::tensor::math::reduce_sum_single_axis::reduce_sum_single_axis; fn square< T, diff --git a/src/operators/tensor/math/reduce_log_sum.cairo b/src/operators/tensor/math/reduce_log_sum.cairo index 2149b72b9..8911b1e04 100644 --- a/src/operators/tensor/math/reduce_log_sum.cairo +++ b/src/operators/tensor/math/reduce_log_sum.cairo @@ -1,7 +1,7 @@ use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; -use orion::operators::tensor::math::reduce_sum::reduce_sum_single_axis; +use orion::operators::tensor::math::reduce_sum_single_axis::reduce_sum_single_axis; /// Cf: TensorTrait::reduce_sum_square docstring fn reduce_log_sum< diff --git a/src/operators/tensor/math/reduce_sum.cairo b/src/operators/tensor/math/reduce_sum.cairo index c5c0293d7..66d5aea5b 100644 --- a/src/operators/tensor/math/reduce_sum.cairo +++ b/src/operators/tensor/math/reduce_sum.cairo @@ -1,111 +1,111 @@ -use core::array::SpanTrait; -use core::option::OptionTrait; +use alexandria_sorting::bubble_sort; +use alexandria_data_structures::array_ext::{SpanTraitExt}; + +use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; -use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; - +use orion::operators::tensor::helpers::{ + reduce_output_shape, len_from_shape, combine_indices, get_all_axes +}; +/// Cf: TensorTrait::reduce_sum docstring fn reduce_sum< T, MAG, impl TTensor: TensorTrait, impl TNumber: NumberTrait, - impl TAddEq: AddEq, impl TCopy: Copy, impl TDrop: Drop >( - self: @Tensor, axes: Option>, keepdims: bool, noop_with_empty_axes: bool + self: @Tensor, + axes: Option>, + keepdims: Option, + noop_with_empty_axes: Option ) -> Tensor { - // Handle case when no reduction is needed - if noop_with_empty_axes && (axes.is_none() || axes.unwrap().is_empty()) { - return *self; - } - - let reducer_len = if let Option::Some(axes) = axes { - axes.len() - } else { - (*self.shape).len() + let noop_with_empty_axes = match noop_with_empty_axes { + Option::Some(noop_with_empty_axes) => noop_with_empty_axes, + Option::None => false, + }; + let axes = match axes { + Option::Some(axes) => { + if (axes.len() == 0) { + get_all_axes(*self.shape) + } else { + assert(axes.len() == axes.unique().len(), 'duplicated axis.'); + let mut axes_arr: Array = array![]; + let mut copy_axes = axes; + loop { + match copy_axes.pop_front() { + Option::Some(axis) => { axes_arr.append(*axis); }, + Option::None => { break; } + }; + }; + let sorted_axes = bubble_sort::bubble_sort_elements(axes_arr, true).span(); + sorted_axes + } + }, + Option::None => { + if noop_with_empty_axes { + return *self; + } + get_all_axes(*self.shape) + }, + }; + let keepdims = match keepdims { + Option::Some(keepdims) => keepdims, + Option::None => true, }; - let mut result_tensor = *self; - let mut axis_index = 0; - while axis_index < reducer_len { - let axis = if let Option::Some(axes) = axes { - *axes.at(axis_index) - } else { - axis_index - }; - result_tensor = - { - let mut output_data: Array = array![]; - let output_shape = reduce_output_shape(result_tensor.shape, axis, keepdims); - let output_data_len = len_from_shape(output_shape); + let mut axis_c = 0; + let mut copy_axes = axes; + let mut shape = *self.shape; + let mut data = *self.data; + loop { + match copy_axes.pop_front() { + Option::Some(axis) => { + if (shape.len() == 1) { + let current_sum = accumulate_sum::(data, shape, shape, 0); + shape = array![].span(); + data = array![current_sum].span(); + break (); + } + let mut temp_data = array![]; + let mut temp_shape = reduce_output_shape(shape, *axis - axis_c, false); + let data_len = len_from_shape(temp_shape); let mut index: usize = 0; + while index != data_len { + let indices = unravel_index(index, temp_shape); + let current_sum = accumulate_sum::(data, shape, indices, *axis - axis_c); + + temp_data.append(current_sum); - while index != output_data_len { - let output_indices = unravel_index(index, output_shape); - let current_sum = accumulate_sum::< - T - >(result_tensor.data, result_tensor.shape, output_indices, axis); - output_data.append(current_sum); index += 1; }; - TensorTrait::::new(output_shape, output_data.span()) - }; - axis_index += 1; + shape = temp_shape; + data = temp_data.span(); + axis_c += 1; + }, + Option::None => { break; } + }; }; - result_tensor -} - -fn reduce_sum_single_axis< - T, - MAG, - impl TTensor: TensorTrait, - impl TNumber: NumberTrait, - impl TAddEq: AddEq, - impl TCopy: Copy, - impl TDrop: Drop ->( - self: @Tensor, axis: usize, keepdims: bool -) -> Tensor { - let mut output_data: Array = array![]; - - if (*self.shape).len() == 1 { - assert(axis == 0, 'axis out of dimensions'); - let current_sum = accumulate_sum::(*self.data, *self.shape, *self.shape, axis); - output_data.append(current_sum); - - let mut output_shape: Array = array![]; - output_shape.append(1); - - return TensorTrait::new(output_shape.span(), output_data.span()); - } else { - assert(axis <= (*self.shape).len(), 'axis out of dimensions'); - let output_shape = reduce_output_shape(*self.shape, axis, false); - let output_data_len = len_from_shape(output_shape); - let mut index: usize = 0; - while index != output_data_len { - let output_indices = unravel_index(index, output_shape); - let current_sum = accumulate_sum::(*self.data, *self.shape, output_indices, axis); - - output_data.append(current_sum); - - index += 1; + let mut axes_copy = axes; + if keepdims { + shape = *self.shape; + loop { + match axes_copy.pop_front() { + Option::Some(axis) => { shape = reduce_output_shape(shape, *axis, true); }, + Option::None => { break; } + }; }; - if keepdims { - let output_shape = reduce_output_shape(*self.shape, axis, true); - - TensorTrait::::new(output_shape, output_data.span()) - } else { - TensorTrait::::new(output_shape, output_data.span()) - } + TensorTrait::::new(shape, data) + } else { + TensorTrait::::new(shape, data) } } - /// Helper function that accumulates the sum of elements along a specific axis. /// /// # Arguments @@ -118,43 +118,35 @@ fn reduce_sum_single_axis< /// * Panics if gas limit is exceeded during execution. /// /// # Returns -/// * An i32 value representing the accumulated sum along the specified axis. +/// * A value representing the accumulated sum along the specified axis. fn accumulate_sum< - T, - MAG, - impl TNumber: NumberTrait, - impl TAddEq: AddEq, - impl TCopy: Copy, - impl TDrop: Drop + T, MAG, impl TNumber: NumberTrait, + impl TCopy: Copy, impl TDrop: Drop >( mut input_data: Span, input_shape: Span, output_indices: Span, axis: usize ) -> T { let axis_len = *(input_shape)[axis]; - let mut acc: T = NumberTrait::zero(); + let mut sum: T = NumberTrait::zero(); - let mut axis_index: usize = 0; + let mut axis_index = 0; if (input_shape).len() > 1 { - loop { - if axis_index == axis_len { - break (); - } - + while axis_index != axis_len { let input_indices = combine_indices(output_indices, axis_index, axis); let input_index = ravel_index(input_shape, input_indices); let ele = *(input_data)[input_index]; - acc += ele; + sum = NumberTrait::add(sum, ele); + axis_index += 1; }; } else { loop { match input_data.pop_front() { - Option::Some(item) => { acc += *item; }, + Option::Some(item) => sum = NumberTrait::add(sum, *item), Option::None => { break; } }; }; } - return acc; + sum } - diff --git a/src/operators/tensor/math/reduce_sum_single_axis.cairo b/src/operators/tensor/math/reduce_sum_single_axis.cairo new file mode 100644 index 000000000..75ceb8bf8 --- /dev/null +++ b/src/operators/tensor/math/reduce_sum_single_axis.cairo @@ -0,0 +1,106 @@ +use core::array::SpanTrait; +use core::option::OptionTrait; +use orion::numbers::NumberTrait; +use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; +use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; + + +fn reduce_sum_single_axis< + T, + MAG, + impl TTensor: TensorTrait, + impl TNumber: NumberTrait, + impl TAddEq: AddEq, + impl TCopy: Copy, + impl TDrop: Drop +>( + self: @Tensor, axis: usize, keepdims: bool +) -> Tensor { + let mut output_data: Array = array![]; + + if (*self.shape).len() == 1 { + assert(axis == 0, 'axis out of dimensions'); + let current_sum = accumulate_sum::(*self.data, *self.shape, *self.shape, axis); + output_data.append(current_sum); + + let mut output_shape: Array = array![]; + output_shape.append(1); + + return TensorTrait::new(output_shape.span(), output_data.span()); + } else { + assert(axis <= (*self.shape).len(), 'axis out of dimensions'); + let output_shape = reduce_output_shape(*self.shape, axis, false); + let output_data_len = len_from_shape(output_shape); + let mut index: usize = 0; + while index != output_data_len { + let output_indices = unravel_index(index, output_shape); + let current_sum = accumulate_sum::(*self.data, *self.shape, output_indices, axis); + + output_data.append(current_sum); + + index += 1; + }; + + if keepdims { + let output_shape = reduce_output_shape(*self.shape, axis, true); + + TensorTrait::::new(output_shape, output_data.span()) + } else { + TensorTrait::::new(output_shape, output_data.span()) + } + } +} + + +/// Helper function that accumulates the sum of elements along a specific axis. +/// +/// # Arguments +/// * `input_data` - The input's data. +/// * `input_shape` - The input's shape. +/// * `output_indices` - A span of output indices. +/// * `axis` - The axis along which to accumulate the sum. +/// +/// # Panics +/// * Panics if gas limit is exceeded during execution. +/// +/// # Returns +/// * An i32 value representing the accumulated sum along the specified axis. +fn accumulate_sum< + T, + MAG, + impl TNumber: NumberTrait, + impl TAddEq: AddEq, + impl TCopy: Copy, + impl TDrop: Drop +>( + mut input_data: Span, input_shape: Span, output_indices: Span, axis: usize +) -> T { + let axis_len = *(input_shape)[axis]; + let mut acc: T = NumberTrait::zero(); + + let mut axis_index: usize = 0; + + if (input_shape).len() > 1 { + loop { + if axis_index == axis_len { + break (); + } + + let input_indices = combine_indices(output_indices, axis_index, axis); + let input_index = ravel_index(input_shape, input_indices); + let ele = *(input_data)[input_index]; + acc += ele; + axis_index += 1; + }; + } else { + loop { + match input_data.pop_front() { + Option::Some(item) => { acc += *item; }, + Option::None => { break; } + }; + }; + } + + return acc; +} + diff --git a/src/operators/tensor/math/reduce_sum_square.cairo b/src/operators/tensor/math/reduce_sum_square.cairo index 3f4d2e6c4..3b9cd5e2b 100644 --- a/src/operators/tensor/math/reduce_sum_square.cairo +++ b/src/operators/tensor/math/reduce_sum_square.cairo @@ -1,7 +1,7 @@ use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::numbers::fixed_point::core::FixedTrait; -use orion::operators::tensor::math::reduce_sum::reduce_sum_single_axis; +use orion::operators::tensor::math::reduce_sum_single_axis::reduce_sum_single_axis; fn square< T, diff --git a/tests/lib.cairo b/tests/lib.cairo index f5cecb77d..a61287d92 100644 --- a/tests/lib.cairo +++ b/tests/lib.cairo @@ -1,7 +1,7 @@ -mod numbers; -mod performance; -mod tensor_core; +// mod numbers; +// mod performance; +// mod tensor_core; mod nodes; -mod ml; -mod operators; +// mod ml; +// mod operators; diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 28948e5c5..044698c62 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1,1049 +1,1050 @@ -mod abs_fp16x16; -mod abs_fp8x23; -mod abs_i32; -mod abs_i8; -mod acos_fp16x16; -mod acos_fp8x23; -mod acosh_fp16x16; -mod acosh_fp8x23; -mod add_fp16x16; -mod add_fp16x16_broadcast; -mod add_fp8x23; -mod add_fp8x23_broadcast; -mod add_i32; -mod add_i32_broadcast; -mod add_i8; -mod add_i8_broadcast; -mod add_u32; -mod add_u32_broadcast; -mod argmax_fp16x16_1D_default; -mod argmax_fp16x16_1D_keepdims_false; -mod argmax_fp16x16_1D_last_index; -mod argmax_fp16x16_2D_default; -mod argmax_fp16x16_2D_keepdims_false; -mod argmax_fp16x16_2D_last_index; -mod argmax_fp16x16_3D_default; -mod argmax_fp16x16_3D_keepdims_false; -mod argmax_fp16x16_3D_last_index; -mod argmax_fp8x23_1D_default; -mod argmax_fp8x23_1D_keepdims_false; -mod argmax_fp8x23_1D_last_index; -mod argmax_fp8x23_2D_default; -mod argmax_fp8x23_2D_keepdims_false; -mod argmax_fp8x23_2D_last_index; -mod argmax_fp8x23_3D_default; -mod argmax_fp8x23_3D_keepdims_false; -mod argmax_fp8x23_3D_last_index; -mod argmax_i32_1D_default; -mod argmax_i32_1D_keepdims_false; -mod argmax_i32_1D_last_index; -mod argmax_i32_2D_default; -mod argmax_i32_2D_keepdims_false; -mod argmax_i32_2D_last_index; -mod argmax_i32_3D_default; -mod argmax_i32_3D_keepdims_false; -mod argmax_i32_3D_last_index; -mod argmax_i8_1D_default; -mod argmax_i8_1D_keepdims_false; -mod argmax_i8_1D_last_index; -mod argmax_i8_2D_default; -mod argmax_i8_2D_keepdims_false; -mod argmax_i8_2D_last_index; -mod argmax_i8_3D_default; -mod argmax_i8_3D_keepdims_false; -mod argmax_i8_3D_last_index; -mod argmax_u32_1D_default; -mod argmax_u32_1D_keepdims_false; -mod argmax_u32_1D_last_index; -mod argmax_u32_2D_default; -mod argmax_u32_2D_keepdims_false; -mod argmax_u32_2D_last_index; -mod argmax_u32_3D_default; -mod argmax_u32_3D_keepdims_false; -mod argmax_u32_3D_last_index; -mod argmin_fp16x16_1D_default; -mod argmin_fp16x16_1D_keepdims_false; -mod argmin_fp16x16_1D_last_index; -mod argmin_fp16x16_2D_default; -mod argmin_fp16x16_2D_keepdims_false; -mod argmin_fp16x16_2D_last_index; -mod argmin_fp16x16_3D_default; -mod argmin_fp16x16_3D_keepdims_false; -mod argmin_fp16x16_3D_last_index; -mod argmin_fp8x23_1D_default; -mod argmin_fp8x23_1D_keepdims_false; -mod argmin_fp8x23_1D_last_index; -mod argmin_fp8x23_2D_default; -mod argmin_fp8x23_2D_keepdims_false; -mod argmin_fp8x23_2D_last_index; -mod argmin_fp8x23_3D_default; -mod argmin_fp8x23_3D_keepdims_false; -mod argmin_fp8x23_3D_last_index; -mod argmin_i32_1D_default; -mod argmin_i32_1D_keepdims_false; -mod argmin_i32_1D_last_index; -mod argmin_i32_2D_default; -mod argmin_i32_2D_keepdims_false; -mod argmin_i32_2D_last_index; -mod argmin_i32_3D_default; -mod argmin_i32_3D_keepdims_false; -mod argmin_i32_3D_last_index; -mod argmin_i8_1D_default; -mod argmin_i8_1D_keepdims_false; -mod argmin_i8_1D_last_index; -mod argmin_i8_2D_default; -mod argmin_i8_2D_keepdims_false; -mod argmin_i8_2D_last_index; -mod argmin_i8_3D_default; -mod argmin_i8_3D_keepdims_false; -mod argmin_i8_3D_last_index; -mod argmin_u32_1D_default; -mod argmin_u32_1D_keepdims_false; -mod argmin_u32_1D_last_index; -mod argmin_u32_2D_default; -mod argmin_u32_2D_keepdims_false; -mod argmin_u32_2D_last_index; -mod argmin_u32_3D_default; -mod argmin_u32_3D_keepdims_false; -mod argmin_u32_3D_last_index; -mod asin_fp16x16; -mod asin_fp8x23; -mod asinh_fp16x16; -mod asinh_fp8x23; -mod atan_fp16x16; -mod atan_fp8x23; -mod ceil_fp16x16; -mod ceil_fp8x23; -mod concat_fp16x16_1d; -mod concat_fp16x16_2d; -mod concat_fp16x16_3d_default; -mod concat_fp16x16_3d_axis_1; -mod concat_fp16x16_3d_axis_2; -mod concat_fp16x16_3d_three_tensors_axis_1; -mod concat_fp16x16_3d_three_tensors_axis_2; -mod concat_fp8x23_1d; -mod concat_fp8x23_2d; -mod concat_fp8x23_3d_default; -mod concat_fp8x23_3d_axis_1; -mod concat_fp8x23_3d_axis_2; -mod concat_fp8x23_3d_three_tensors_axis_1; -mod concat_fp8x23_3d_three_tensors_axis_2; -mod concat_i32_1d; -mod concat_i32_2d; -mod concat_i32_3d_default; -mod concat_i32_3d_axis_1; -mod concat_i32_3d_axis_2; -mod concat_i32_3d_three_tensors_axis_1; -mod concat_i32_3d_three_tensors_axis_2; -mod concat_i8_1d; -mod concat_i8_2d; -mod concat_i8_3d_default; -mod concat_i8_3d_axis_1; -mod concat_i8_3d_axis_2; -mod concat_i8_3d_three_tensors_axis_1; -mod concat_i8_3d_three_tensors_axis_2; -mod concat_u32_1d; -mod concat_u32_2d; -mod concat_u32_3d_default; -mod concat_u32_3d_axis_1; -mod concat_u32_3d_axis_2; -mod concat_u32_3d_three_tensors_axis_1; -mod concat_u32_3d_three_tensors_axis_2; -mod cos_fp16x16; -mod cos_fp8x23; -mod cosh_fp16x16; -mod cosh_fp8x23; -mod cumsum_fp16x16_1d_default; -mod cumsum_fp16x16_1d_exclusive; -mod cumsum_fp16x16_1d_reverse; -mod cumsum_fp16x16_1d_reverse_exclusive; -mod cumsum_fp16x16_2d_axis_0; -mod cumsum_fp16x16_2d_axis_1; -mod cumsum_fp8x23_1d_default; -mod cumsum_fp8x23_1d_exclusive; -mod cumsum_fp8x23_1d_reverse; -mod cumsum_fp8x23_1d_reverse_exclusive; -mod cumsum_fp8x23_2d_axis_0; -mod cumsum_fp8x23_2d_axis_1; -mod cumsum_i32_1d_default; -mod cumsum_i32_1d_exclusive; -mod cumsum_i32_1d_reverse; -mod cumsum_i32_1d_reverse_exclusive; -mod cumsum_i32_2d_axis_0; -mod cumsum_i32_2d_axis_1; -mod cumsum_i8_1d_default; -mod cumsum_i8_1d_exclusive; -mod cumsum_i8_1d_reverse; -mod cumsum_i8_1d_reverse_exclusive; -mod cumsum_i8_2d_axis_0; -mod cumsum_i8_2d_axis_1; -mod cumsum_u32_1d_default; -mod cumsum_u32_1d_exclusive; -mod cumsum_u32_1d_reverse; -mod cumsum_u32_1d_reverse_exclusive; -mod cumsum_u32_2d_axis_0; -mod cumsum_u32_2d_axis_1; -mod div_fp16x16; -mod div_fp16x16_broadcast; -mod div_fp8x23; -mod div_fp8x23_broadcast; -mod div_i32; -mod div_i32_broadcast; -mod div_i8; -mod div_i8_broadcast; -mod div_u32; -mod div_u32_broadcast; -mod equal_fp16x16; -mod equal_fp16x16_broadcast; -mod equal_fp8x23; -mod equal_fp8x23_broadcast; -mod equal_i32; -mod equal_i32_broadcast; -mod equal_i8; -mod equal_i8_broadcast; -mod equal_u32; -mod equal_u32_broadcast; -mod exp_fp16x16; -mod exp_fp8x23; -mod less_equal_fp16x16; -mod less_equal_fp16x16_broadcast; -mod less_equal_fp8x23; -mod less_equal_fp8x23_broadcast; -mod less_equal_i32; -mod less_equal_i32_broadcast; -mod less_equal_i8; -mod less_equal_i8_broadcast; -mod less_equal_u32; -mod less_equal_u32_broadcast; -mod greater_fp16x16; -mod greater_fp16x16_broadcast; -mod greater_fp8x23; -mod greater_fp8x23_broadcast; -mod greater_i32; -mod greater_i32_broadcast; -mod greater_i8; -mod greater_i8_broadcast; -mod greater_u32; -mod greater_u32_broadcast; -mod leaky_relu_fp16x16; -mod leaky_relu_fp8x23; -mod linear_fp16x16; -mod linear_fp8x23; -mod linear_i32; -mod linear_i8; -mod linear_u32; -mod log_fp16x16; -mod log_fp8x23; -mod logsoftmax_fp16x16_axis_0; -mod logsoftmax_fp16x16_axis_1; -mod logsoftmax_fp8x23_axis_0; -mod logsoftmax_fp8x23_axis_1; -mod matmul_fp16x16_1d; -mod matmul_fp16x16_2x2; -mod matmul_fp16x16_2x1; -mod matmul_fp16x16_1x2; -mod matmul_fp8x23_1d; -mod matmul_fp8x23_2x2; -mod matmul_fp8x23_2x1; -mod matmul_fp8x23_1x2; -mod matmul_i32_1d; -mod matmul_i32_2x2; -mod matmul_i32_2x1; -mod matmul_i32_1x2; -mod matmul_i8_1d; -mod matmul_i8_2x2; -mod matmul_i8_2x1; -mod matmul_i8_1x2; -mod matmul_u32_1d; -mod matmul_u32_2x2; -mod matmul_u32_2x1; -mod matmul_u32_1x2; -mod mul_fp16x16; -mod mul_fp16x16_broadcast; -mod mul_fp8x23; -mod mul_fp8x23_broadcast; -mod mul_i32; -mod mul_i32_broadcast; -mod mul_i8; -mod mul_i8_broadcast; -mod mul_u32; -mod mul_u32_broadcast; -mod or_fp16x16; -mod or_fp16x16_broadcast; -mod or_fp8x23; -mod or_fp8x23_broadcast; -mod or_i32; -mod or_i32_broadcast; -mod or_i8; -mod or_i8_broadcast; -mod or_u32; -mod or_u32_broadcast; -mod relu_fp16x16; -mod relu_fp8x23; -mod relu_i32; -mod relu_i8; -mod sigmoid_fp16x16; -mod sigmoid_fp8x23; -mod sin_fp16x16; -mod sin_fp8x23; -mod sinh_fp16x16; -mod sinh_fp8x23; -mod softmax_fp16x16; -mod softmax_fp8x23; -mod softplus_fp8x23; -mod softplus_fp16x16; -mod softsign_fp8x23; -mod softsign_fp16x16; -mod sqrt_fp16x16; -mod sqrt_fp8x23; -mod sub_fp16x16; -mod sub_fp16x16_broadcast; -mod sub_fp8x23; -mod sub_fp8x23_broadcast; -mod sub_i32; -mod sub_i32_broadcast; -mod sub_i8; -mod sub_i8_broadcast; -mod sub_u32; -mod sub_u32_broadcast; -mod tanh_fp16x16; -mod tanh_fp8x23; -mod transpose_fp16x16_2d; -mod transpose_fp16x16_3d; -mod transpose_fp8x23_2d; -mod transpose_fp8x23_3d; -mod transpose_i32_2d; -mod transpose_i32_3d; -mod transpose_i8_2d; -mod transpose_i8_3d; -mod transpose_u32_2d; -mod transpose_u32_3d; -mod xor_fp16x16; -mod xor_fp16x16_broadcast; -mod xor_fp8x23; -mod xor_fp8x23_broadcast; -mod xor_i32; -mod xor_i32_broadcast; -mod xor_i8; -mod xor_i8_broadcast; -mod xor_u32; -mod xor_u32_broadcast; -mod less_fp16x16; -mod less_fp16x16_broadcast; -mod less_fp8x23; -mod less_fp8x23_broadcast; -mod less_i32; -mod less_i32_broadcast; -mod less_i8; -mod less_i8_broadcast; -mod less_u32; -mod less_u32_broadcast; -mod greater_equal_fp16x16; -mod greater_equal_fp16x16_broadcast; -mod greater_equal_fp8x23; -mod greater_equal_fp8x23_broadcast; -mod greater_equal_i32; -mod greater_equal_i32_broadcast; -mod greater_equal_i8; -mod greater_equal_i8_broadcast; -mod greater_equal_u32; -mod greater_equal_u32_broadcast; -mod slice_fp16x16_2d; -mod slice_fp16x16_3d; -mod slice_fp8x23_2d; -mod slice_fp8x23_3d; -mod slice_i32_2d; -mod slice_i32_3d; -mod slice_i8_2d; -mod slice_i8_3d; -mod slice_u32_2d; -mod slice_u32_3d; -mod gather_fp8x23_3d_default; -mod gather_fp8x23_3d_axis1; -mod gather_fp8x23_3d_axis2; -mod gather_fp16x16_3d_default; -mod gather_fp16x16_3d_axis1; -mod gather_fp16x16_3d_axis2; -mod gather_i8_3d_default; -mod gather_i8_3d_axis1; -mod gather_i8_3d_axis2; -mod gather_i32_3d_default; -mod gather_i32_3d_axis1; -mod gather_i32_3d_axis2; -mod gather_u32_3d_default; -mod gather_u32_3d_axis1; -mod gather_u32_3d_axis2; -mod nonzero_fp16x16_2d; -mod nonzero_fp16x16_3d; -mod nonzero_fp8x23_2d; -mod nonzero_fp8x23_3d; -mod nonzero_i32_2d; -mod nonzero_i32_3d; -mod nonzero_i8_2d; -mod nonzero_i8_3d; -mod nonzero_u32_2d; -mod nonzero_u32_3d; -mod squeeze_fP16x16; -mod squeeze_fP8x23; -mod squeeze_i32; -mod squeeze_i8; -mod squeeze_u32; -mod unsqueeze_fp16x16_2d; -mod unsqueeze_fp16x16_3d; -mod unsqueeze_fp8x23_2d; -mod unsqueeze_fp8x23_3d; -mod unsqueeze_i32_2d; -mod unsqueeze_i32_3d; -mod unsqueeze_i8_2d; -mod unsqueeze_i8_3d; -mod unsqueeze_u32_2d; -mod unsqueeze_u32_3d; -mod sign_fP16x16; -mod sign_fP8x23; -mod sign_fail; -mod sign_i32; -mod sign_i8; -mod clip_fp16x16_2d; -mod clip_fp16x16_3d; -mod clip_fp8x23_2d; -mod clip_fp8x23_3d; -mod clip_i32_2d; -mod clip_i32_3d; -mod clip_i8_2d; -mod clip_i8_3d; -mod clip_u32_2d; -mod clip_u32_3d; -mod identity_fP16x16; -mod identity_fP8x23; -mod identity_i32; -mod identity_i8; -mod identity_u32; -mod thresholded_relu_fp16x16; -mod thresholded_relu_fp8x23; -mod hard_sigmoid_fp8x23; -mod hard_sigmoid_fp16x16; -mod neg_fp16x16; -mod neg_fp8x23; -mod neg_i32; -mod neg_i8; -mod gemm_all_attributes; -mod gemm_alpha; -mod gemm_beta; -mod gemm_default_matrix_bias; -mod gemm_default_vector_bias; -mod gemm_default_no_bias; -mod gemm_transposeA; -mod gemm_transposeB; -mod min_fp16x16_three_tensors; -mod min_fp16x16_broadcast_three_tensors; -mod min_fp16x16_two_tensors; -mod min_fp16x16_broadcast_two_tensors; -mod min_fp8x23_three_tensors; -mod min_fp8x23_broadcast_three_tensors; -mod min_fp8x23_two_tensors; -mod min_fp8x23_broadcast_two_tensors; -mod min_i32_three_tensors; -mod min_i32_broadcast_three_tensors; -mod min_i32_two_tensors; -mod min_i32_broadcast_two_tensors; -mod min_i8_three_tensors; -mod min_i8_broadcast_three_tensors; -mod min_i8_two_tensors; -mod min_i8_broadcast_two_tensors; -mod min_u32_three_tensors; -mod min_u32_broadcast_three_tensors; -mod min_u32_two_tensors; -mod min_u32_broadcast_two_tensors; -mod where_fp16x16; -mod where_fp16x16_broadcast; -mod where_fp8x23; -mod where_fp8x23_broadcast; -mod where_i32; -mod where_i32_broadcast; -mod where_i8; -mod where_i8_broadcast; -mod where_u32; -mod where_u32_broadcast; -mod not_bool; -mod round_fp16x16; -mod round_fp8x23; -mod max_fp16x16_three_tensors; -mod max_fp16x16_broadcast_three_tensors; -mod max_fp16x16_two_tensors; -mod max_fp16x16_broadcast_two_tensors; -mod max_fp8x23_three_tensors; -mod max_fp8x23_broadcast_three_tensors; -mod max_fp8x23_two_tensors; -mod max_fp8x23_broadcast_two_tensors; -mod max_i32_three_tensors; -mod max_i32_broadcast_three_tensors; -mod max_i32_two_tensors; -mod max_i32_broadcast_two_tensors; -mod max_i8_three_tensors; -mod max_i8_broadcast_three_tensors; -mod max_i8_two_tensors; -mod max_i8_broadcast_two_tensors; -mod max_u32_three_tensors; -mod max_u32_broadcast_three_tensors; -mod max_u32_two_tensors; -mod max_u32_broadcast_two_tensors; -mod scatter_fp16x16_3d_default; -mod scatter_fp16x16_3d_axis1; -mod scatter_fp16x16_3d_axis1_add; -mod scatter_fp8x23_default; -mod scatter_fp8x23_axis1; -mod scatter_fp8x23_mul; -mod scatter_i8_default; -mod scatter_i8_axis1; -mod scatter_i8_axis1_max; -mod scatter_u32_default; -mod scatter_u32_axis1; -mod scatter_u32_add; -mod array_feature_extractor_1D_i32; -mod array_feature_extractor_1D_fp8x23; -mod array_feature_extractor_1D_fp16x16; -mod array_feature_extractor_2D_i32; -mod array_feature_extractor_2D_fp8x23; -mod array_feature_extractor_2D_fp16x16; -mod array_feature_extractor_3D_i32; -mod array_feature_extractor_3D_fp8x23; -mod array_feature_extractor_3D_fp16x16; -mod binarizer_fp16x16; -mod binarizer_fp8x23; -mod tril_fp16x16; -mod tril_fp16x16_neg; -mod tril_fp16x16_one_row; -mod tril_fp16x16_out_neg; -mod tril_fp16x16_out_pos; -mod tril_fp16x16_pos; -mod tril_fp16x16_square; -mod tril_fp16x16_square_neg; -mod tril_fp16x16_zero; -mod triu_fp16x16; -mod triu_fp16x16_neg; -mod triu_fp16x16_one_row; -mod triu_fp16x16_out_neg; -mod triu_fp16x16_out_pos; -mod triu_fp16x16_pos; -mod triu_fp16x16_square; -mod triu_fp16x16_square_neg; -mod triu_fp16x16_zero; -mod tril_fp8x23; -mod tril_fp8x23_neg; -mod tril_fp8x23_one_row; -mod tril_fp8x23_out_neg; -mod tril_fp8x23_out_pos; -mod tril_fp8x23_pos; -mod tril_fp8x23_square; -mod tril_fp8x23_square_neg; -mod tril_fp8x23_zero; -mod triu_fp8x23; -mod triu_fp8x23_neg; -mod triu_fp8x23_one_row; -mod triu_fp8x23_out_neg; -mod triu_fp8x23_out_pos; -mod triu_fp8x23_pos; -mod triu_fp8x23_square; -mod triu_fp8x23_square_neg; -mod triu_fp8x23_zero; -mod tril_i32; -mod tril_neg_i32; -mod tril_i32_one_row; -mod tril_i32_out_neg; -mod tril_i32_out_pos; -mod tril_i32_pos; -mod tril_i32_square; -mod tril_i32_square_neg; -mod tril_i32_zero; -mod triu_i32; -mod triu_i32_neg; -mod triu_i32_one_row; -mod triu_i32_out_neg; -mod triu_i32_out_pos; -mod triu_i32_pos; -mod triu_i32_square; -mod triu_i32_square_neg; -mod triu_i32_zero; -mod tril_i8; -mod tril_i8_neg; -mod tril_i8_one_row; -mod tril_i8_out_neg; -mod tril_i8_out_pos; -mod tril_i8_pos; -mod tril_i8_square; -mod tril_i8_square_neg; -mod tril_i8_zero; -mod triu_i8; -mod triu_i8_neg; -mod triu_i8_one_row; -mod triu_i8_out_neg; -mod triu_i8_out_pos; -mod triu_i8_pos; -mod triu_i8_square; -mod triu_i8_square_neg; -mod triu_i8_zero; -mod tril_u32; -mod tril_u32_neg; -mod tril_u32_one_row; -mod tril_u32_out_neg; -mod tril_u32_out_pos; -mod tril_u32_pos; -mod tril_u32_square; -mod tril_u32_square_neg; -mod tril_u32_zero; -mod triu_u32; -mod triu_u32_neg; -mod triu_u32_one_row; -mod triu_u32_out_neg; -mod triu_u32_out_pos; -mod triu_u32_pos; -mod triu_u32_square; -mod triu_u32_square_neg; -mod triu_u32_zero; -mod reduce_sum_square_fp16x16_export_do_not_keepdims; -mod reduce_sum_square_fp16x16_export_keepdims; -mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; -mod reduce_sum_square_fp8x23_export_do_not_keepdims; -mod reduce_sum_square_fp8x23_export_keepdims; -mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; -mod reduce_sum_square_i32_export_do_not_keepdims; -mod reduce_sum_square_i32_export_keepdims; -mod reduce_sum_square_i32_export_negative_axes_keepdims; -mod reduce_sum_square_i8_export_do_not_keepdims; -mod reduce_sum_square_i8_export_keepdims; -mod reduce_sum_square_i8_export_negative_axes_keepdims; -mod reduce_sum_square_u32_export_do_not_keepdims; -mod reduce_sum_square_u32_export_keepdims; -mod reduce_sum_square_u32_export_negative_axes_keepdims; -mod reduce_l2_fp16x16_export_do_not_keepdims; -mod reduce_l2_fp16x16_export_keepdims; -mod reduce_l2_fp16x16_export_negative_axes_keepdims; -mod reduce_l2_fp8x23_export_do_not_keepdims; -mod reduce_l2_fp8x23_export_keepdims; -mod reduce_l2_fp8x23_export_negative_axes_keepdims; -mod reduce_l1_fp16x16_export_do_not_keepdims; -mod reduce_l1_fp16x16_export_keepdims; -mod reduce_l1_fp16x16_export_negative_axes_keepdims; -mod reduce_l1_fp8x23_export_do_not_keepdims; -mod reduce_l1_fp8x23_export_keepdims; -mod reduce_l1_fp8x23_export_negative_axes_keepdims; -mod reduce_l1_i32_export_do_not_keepdims; -mod reduce_l1_i32_export_keepdims; -mod reduce_l1_i32_export_negative_axes_keepdims; -mod reduce_l1_i8_export_do_not_keepdims; -mod reduce_l1_i8_export_keepdims; -mod reduce_l1_i8_export_negative_axes_keepdims; -mod reduce_l1_u32_export_do_not_keepdims; -mod reduce_l1_u32_export_keepdims; -mod reduce_l1_u32_export_negative_axes_keepdims; -mod reduce_prod_fp16x16_1D; -mod reduce_prod_fp16x16_2D_default; -mod reduce_prod_fp16x16_2D_keepdims; -mod reduce_prod_fp16x16_2D_axis_1; -mod reduce_prod_fp8x23_1D; -mod reduce_prod_fp8x23_2D_default; -mod reduce_prod_fp8x23_2D_keepdims; -mod reduce_prod_fp8x23_2D_axis_1; -mod reduce_prod_i32_1D; -mod reduce_prod_i32_2D_default; -mod reduce_prod_i32_2D_keepdims; -mod reduce_prod_i32_2D_axis_1; -mod reduce_prod_i8_1D; -mod reduce_prod_i8_2D_default; -mod reduce_prod_i8_2D_keepdims; -mod reduce_prod_i8_2D_axis_1; -mod reduce_prod_u32_1D; -mod reduce_prod_u32_2D_default; -mod reduce_prod_u32_2D_keepdims; -mod reduce_prod_u32_2D_axis_1; -mod gather_elements_fp16x16_3d_default; -mod gather_elements_fp16x16_3d_axis1; -mod gather_elements_fp16x16_3d_axis2; -mod gather_elements_fp8x23_3d_default; -mod gather_elements_fp8x23_3d_axis1; -mod gather_elements_fp8x23_3d_axis2; -mod gather_elements_i8_3d_default; -mod gather_elements_i8_3d_axis1; -mod gather_elements_i32_3d_default; -mod gather_elements_i32_3d_axis1; -mod gather_elements_i32_3d_axis2; -mod gather_elements_u32_default; -mod gather_elements_u32_axis1; -mod gather_elements_u32_axis2; -mod gather_elements_u32_axis3; -mod sequence_length_fp16x16; -mod sequence_length_fp16x16_broadcast; -mod sequence_length_fp8x23; -mod sequence_length_fp8x23_broadcast; -mod sequence_length_i32; -mod sequence_length_i32_broadcast; -mod sequence_length_i8; -mod sequence_length_i8_broadcast; -mod sequence_length_u32; -mod sequence_length_u32_broadcast; -mod sequence_at_u32_positive; -mod sequence_at_u32_negative; -mod sequence_at_fp16x16_positive; -mod sequence_at_fp16x16_negative; -mod sequence_at_fp8x23_positive; -mod sequence_at_fp8x23_negative; -mod sequence_at_i32_positive; -mod sequence_at_i32_negative; -mod sequence_at_i8_positive; -mod sequence_at_i8_negative; -mod reduce_min_fp16x16_1D; -mod reduce_min_fp16x16_2D_default; -mod reduce_min_fp16x16_2D_keepdims; -mod reduce_min_fp16x16_2D_axis_1; -mod reduce_min_fp8x23_1D; -mod reduce_min_fp8x23_2D_default; -mod reduce_min_fp8x23_2D_keepdims; -mod reduce_min_fp8x23_2D_axis_1; -mod reduce_min_i32_1D; -mod reduce_min_i32_2D_default; -mod reduce_min_i32_2D_keepdims; -mod reduce_min_i32_2D_axis_1; -mod reduce_min_i8_1D; -mod reduce_min_i8_2D_default; -mod reduce_min_i8_2D_keepdims; -mod reduce_min_i8_2D_axis_1; -mod reduce_min_u32_1D; -mod reduce_min_u32_2D_default; -mod reduce_min_u32_2D_keepdims; -mod reduce_min_u32_2D_axis_1; -mod sequence_construct_fp16x16; -mod sequence_construct_fp8x23; -mod sequence_construct_i32; -mod sequence_construct_i8; -mod sequence_construct_u32; -mod shrink_hard_fp16x16; -mod shrink_soft_fp16x16; -mod shrink_hard_fp8x23; -mod shrink_soft_fp8x23; -mod sequence_empty_fp16x16; -mod sequence_empty_fp8x23; -mod sequence_empty_i32; -mod sequence_empty_i8; -mod sequence_empty_u32; -mod reduce_mean_fp16x16_1D; -mod reduce_mean_fp16x16_2D_default; -mod reduce_mean_fp16x16_2D_keepdims; -mod reduce_mean_fp16x16_2D_axis_1; -mod reduce_mean_fp8x23_1D; -mod reduce_mean_fp8x23_2D_default; -mod reduce_mean_fp8x23_2D_keepdims; -mod reduce_mean_fp8x23_2D_axis_1; -mod reduce_mean_i32_1D; -mod reduce_mean_i32_2D_default; -mod reduce_mean_i32_2D_keepdims; -mod reduce_mean_i32_2D_axis_1; -mod reduce_mean_i8_1D; -mod reduce_mean_i8_2D_default; -mod reduce_mean_i8_2D_keepdims; -mod reduce_mean_i8_2D_axis_1; -mod reduce_mean_u32_1D; -mod reduce_mean_u32_2D_default; -mod reduce_mean_u32_2D_keepdims; -mod reduce_mean_u32_2D_axis_1; -mod pow_fp16x16; -mod pow_fp16x16_broadcast; -mod pow_fp8x23; -mod pow_fp8x23_broadcast; -mod sequence_erase_u32_positive; -mod sequence_erase_u32_negative; -mod sequence_erase_u32_empty; -mod sequence_erase_fp16x16_positive; -mod sequence_erase_fp16x16_negative; -mod sequence_erase_fp16x16_empty; -mod sequence_erase_fp8x23_positive; -mod sequence_erase_fp8x23_negative; -mod sequence_erase_fp8x23_empty; -mod sequence_erase_i32_positive; -mod sequence_erase_i32_negative; -mod sequence_erase_i32_empty; -mod sequence_erase_i8_positive; -mod sequence_erase_i8_negative; -mod sequence_erase_i8_empty; -mod sequence_insert_fp16x16; -mod sequence_insert_fp8x23; -mod sequence_insert_i32; -mod sequence_insert_i8; -mod sequence_insert_u32; -mod concat_from_sequence_fp8x23_new_axis_zero; -mod concat_from_sequence_fp8x23_new_axis_one; -mod concat_from_sequence_fp8x23_new_axis_default; -mod concat_from_sequence_fp16x16_new_axis_zero; -mod concat_from_sequence_fp16x16_new_axis_one; -mod concat_from_sequence_fp16x16_new_axis_default; -mod concat_from_sequence_i32_new_axis_zero; -mod concat_from_sequence_i32_new_axis_one; -mod concat_from_sequence_i32_new_axis_default; -mod concat_from_sequence_i8_new_axis_zero; -mod concat_from_sequence_i8_new_axis_one; -mod concat_from_sequence_i8_new_axis_default; -mod concat_from_sequence_u32_new_axis_zero; -mod concat_from_sequence_u32_new_axis_one; -mod concat_from_sequence_u32_new_axis_default; -mod is_nan_fp16x16; -mod is_nan_fp8x23; -mod is_inf_fp16x16; -mod is_inf_fp8x23; -mod is_inf_i32; -mod is_inf_i8; -mod is_inf_u32; -mod is_pos_inf_fp16x16; -mod is_neg_inf_fp16x16; -mod is_pos_inf_fp8x23; -mod is_neg_inf_fp8x23; -mod is_pos_inf_i32; -mod is_neg_inf_i32; -mod is_pos_inf_i8; -mod is_neg_inf_i8; -mod reduce_log_sum_fp8x23_export_do_not_keepdims; -mod reduce_log_sum_fp8x23_export_keepdims; -mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; -mod reduce_log_sum_fp16x16_export_do_not_keepdims; -mod reduce_log_sum_fp16x16_export_keepdims; -mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; -mod and_bool; -mod erf_fp16x16; -mod erf_fp8x23; -mod unique_fp16x16_without_axis_sorted; -mod unique_fp16x16_with_axis_zero_sorted; -mod unique_u32_without_axis_sorted; -mod unique_u32_without_axis_not_sorted; -mod unique_u32_with_axis_zero_sorted; -mod unique_u32_with_axis_zero_not_sorted; -mod unique_u32_with_axis_one_sorted; -mod unique_u32_with_axis_one_not_sorted; -mod gather_nd_fp16x16_3d_default; -mod gather_nd_fp16x16_3d_batch_dims1; -mod gather_nd_fp16x16_3d_batch_dims2; -mod gather_nd_fp8x23_3d_default; -mod gather_nd_fp8x23_3d_batch_dims1; -mod gather_nd_fp8x23_3d_batch_dims2; -mod gather_nd_i32_3d_default; -mod gather_nd_i32_3d_batch_dims1; -mod gather_nd_i32_3d_batch_dims2; -mod gather_nd_i8_3d_default; -mod gather_nd_i8_3d_batch_dims1; -mod gather_nd_u32_default; -mod gather_nd_u32_batch_dims1; -mod gather_nd_u32_batch_dims2; -mod resize_upsample_scales_nearest; -mod resize_downsample_scales_cubic; -mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; -mod resize_downsample_scales_cubic_align_corners; -mod resize_upsample_scales_linear; -mod resize_downsample_scales_linear_align_corners; -mod resize_downsample_scales_nearest; -mod resize_upsample_scales_cubic; -mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; -mod resize_upsample_scales_cubic_align_corners; -mod resize_upsample_scales_cubic_asymmetric; -mod resize_upsample_scales_linear_align_corners; -mod resize_upsample_sizes_nearest; -mod resize_upsample_sizes_cubic; -mod resize_downsample_sizes_cubic; -mod resize_downsample_sizes_nearest; -mod resize_upsample_scales_linear_half_pixel_symmetric; -mod resize_downsample_scales_cubic_antialias; -mod resize_downsample_scales_linear_antialias; -mod resize_downsample_sizes_cubic_antialias; -mod resize_downsample_sizes_linear_pytorch_half_pixel; -mod resize_tf_crop_and_resize; -mod resize_tf_crop_and_resize_extrapolation_value; -mod resize_upsample_scales_nearest_axes_2_3; -mod resize_upsample_scales_nearest_axes_3_2; -mod resize_upsample_sizes_nearest_axes_2_3; -mod resize_upsample_sizes_nearest_ceil_half_pixel; -mod resize_upsample_sizes_nearest_floor_align_corners; -mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; -mod resize_downsample_scales_linear_half_pixel_symmetric; -mod resize_downsample_sizes_nearest_not_larger; -mod resize_downsample_sizes_nearest_not_smaller; -mod resize_tf_crop_and_resize_axes_2_3; -mod resize_tf_crop_and_resize_axes_3_2; -mod resize_upsample_sizes_nearest_axes_3_2; -mod resize_upsample_sizes_nearest_not_larger; -mod resize_upsample_sizes_nearest_not_smaller; -mod compress_fp16x16_3d_default; -mod compress_fp16x16_3d_axis1; -mod compress_fp16x16_3d_axis2; -mod compress_fp16x16_3d_axis3; -mod compress_fp16x16_3d_noaxis; -mod compress_fp8x23_3d_default; -mod compress_fp8x23_3d_axis1; -mod compress_fp8x23_3d_axis2; -mod compress_i32_3d_default; -mod compress_i32_3d_axis1; -mod compress_i32_3d_axis2; -mod compress_i8_3d_default; -mod compress_i8_3d_axis1; -mod compress_i8_3d_axis2; -mod compress_u32_3d_default; -mod compress_u32_3d_axis1; -mod compress_u32_3d_axis2; -mod compress_u32_3d_axis2_2; -mod compress_u32_3d_axis3; -mod reduce_log_sum_exp_fp32x32_export_do_not_keepdims; -mod reduce_log_sum_exp_fp32x32_export_keepdims; -mod reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims; -mod layer_normalization_default_axis; -mod layer_normalization_4d_axis0; -mod layer_normalization_4d_axis1; -mod layer_normalization_4d_axis2; -mod layer_normalization_4d_axis3; -mod layer_normalization_3d_axis0_epsilon; -mod layer_normalization_3d_axis_negative_3_epsilon; -mod layer_normalization_3d_axis1_epsilon; -mod layer_normalization_3d_axis2_epsilon; -mod layer_normalization_4d_axis_negative_4; -mod layer_normalization_4d_axis_negative_3; -mod layer_normalization_4d_axis_negative_2; -mod layer_normalization_4d_axis_negative_1; -mod layer_normalization_3d_axis_negative_2_epsilon; -mod layer_normalization_3d_axis_negative_1_epsilon; -mod layer_normalization_test; -mod split_u32_1d_equal_parts; -mod split_u32_2d_equal_parts; -mod split_u32_zero_size; -mod split_u32_1d_variable_parts; -mod split_u32_2d_variable_parts; -mod split_u32_1d_uneven; -mod split_u32_2d_uneven; -mod split_fp16x16_1d_equal_parts; -mod split_fp16x16_1d_variable_parts; -mod split_fp16x16_2d_equal_parts; -mod split_fp16x16_2d_variable_parts; -mod split_fp16x16_zero_size; -mod split_fp16x16_1d_uneven; -mod split_fp16x16_2d_uneven; -mod grid_sample; -mod grid_sample_cubic; -mod grid_sample_aligncorners; -mod grid_sample_nearest; -mod grid_sample_nearest_aligncorner; -mod grid_sample_padding_border; -mod grid_sample_padding_reflection; -mod grid_sample_padding_zeros; -mod col2im; -mod col2im_5D; -mod col2im_dilations; -mod col2im_pads; -mod col2im_strides; -mod random_uniform_like_fp16x16; -mod random_uniform_like_fp8x23; -mod range_fp8x23; -mod range_fp16x16; -mod range_i32; -mod range_i8; -mod range_u32; -mod hann_window_fp8x23; -mod hann_window_fp16x16; -mod hamming_window_fp16x16; -mod hamming_window_fp8x23; -mod blackman_window_fp16x16; -mod blackman_window_fp8x23; -mod split_to_sequence_fp16x16_1d_equal_parts; -mod split_to_sequence_fp16x16_1d_variable_parts; -mod split_to_sequence_fp16x16_2d_equal_parts; -mod split_to_sequence_fp16x16_2d_variable_parts; -mod split_to_sequence_fp16x16_zero_size; -mod split_to_sequence_fp16x16_1d_uneven; -mod split_to_sequence_fp16x16_2d_uneven; -mod split_to_sequence_u32_1d_equal_parts; -mod split_to_sequence_u32_1d_variable_parts; -mod split_to_sequence_u32_2d_equal_parts; -mod split_to_sequence_u32_2d_variable_parts; -mod split_to_sequence_u32_zero_size; -mod split_to_sequence_u32_1d_uneven; -mod split_to_sequence_u32_2d_uneven; -mod split_to_sequence_2d_scalar; -mod split_to_sequence_2d_nokeepdims; -mod split_to_sequence_1d_nokeepdims; -mod reverse_sequence_fp16x16_batch_equal_parts; -mod reverse_sequence_fp16x16_time_equal_parts; -mod reverse_sequence_i32_batch_equal_parts; -mod reverse_sequence_i32_time_equal_parts; -mod reverse_sequence_i8_batch_equal_parts; -mod reverse_sequence_i8_time_equal_parts; -mod reverse_sequence_u32_4x4_batch; -mod reverse_sequence_u32_4x4_time; -mod reverse_sequence_u32_3x3_batch; -mod reverse_sequence_u32_3x3_time; -mod reverse_sequence_different_dimensions_4_5; -mod reverse_sequence_different_dimensions_2_4; -mod reverse_sequence_different_dimensions_1_6; -mod reverse_sequence_different_dimensions_3x9_batch; -mod reverse_sequence_different_dimensions_3x9_time; -mod conv_transpose; -mod conv_transpose_1d; -mod conv_transpose_3d; -mod conv_transpose_attributes; -mod conv_transpose_autopad_same; -mod conv_transpose_dilations; -mod conv_transpose_pads; -mod conv_transpose_group_2; -mod conv_transpose_group_2_image_3; -mod depth_to_space_fp16x16; -mod depth_to_space_fp8x23; -mod depth_to_space_i32; -mod depth_to_space_i8; -mod depth_to_space_u32; -mod space_to_depth_fp16x16; -mod space_to_depth_fp8x23; -mod space_to_depth_i32; -mod space_to_depth_i8; -mod space_to_depth_u32; -mod scatter_nd_fp16x16_3d_default; -mod scatter_nd_fp16x16_3d_add; -mod scatter_nd_fp16x16_3d_mul; -mod scatter_nd_fp16x16_3d_max; -mod scatter_nd_fp16x16_3d_min; -mod scatter_nd_fp8x23_3d_default; -mod scatter_nd_fp8x23_3d_add; -mod scatter_nd_fp8x23_3d_mul; -mod scatter_nd_fp8x23_3d_max; -mod scatter_nd_fp8x23_3d_min; -mod scatter_nd_u32_default; -mod scatter_nd_u32_add; -mod scatter_nd_u32_mul; -mod scatter_nd_u32_max; -mod scatter_nd_u32_min; -mod conv_2D_with_padding; -mod conv_1D_no_padding; -mod conv_1D_with_padding; -mod conv_3D_no_padding; -mod conv_3D_with_padding; -mod conv_4D_no_padding; -mod conv_2D_with_2_groups; -mod conv_2D_with_autopad_same; -mod conv_2D_with_strides_asymmetric_padding; -mod conv_2D_with_strides_with_padding; -mod conv_4D_with_padding; -mod label_encoder_fp16x16_3d_default; -mod label_encoder_fp8x23_default; -mod label_encoder_i8_default; -mod label_encoder_i32_default; -mod label_encoder_u32_default; -mod reduce_sum_single_axis_fp16x16_1D; -mod reduce_sum_single_axis_fp16x16_2D_default; -mod reduce_sum_single_axis_fp16x16_2D_keepdims; -mod reduce_sum_single_axis_fp16x16_2D_axis_1; -mod reduce_sum_single_axis_fp8x23_1D; -mod reduce_sum_single_axis_fp8x23_2D_default; -mod reduce_sum_single_axis_fp8x23_2D_keepdims; -mod reduce_sum_single_axis_fp8x23_2D_axis_1; -mod reduce_sum_single_axis_i32_1D; -mod reduce_sum_single_axis_i32_2D_default; -mod reduce_sum_single_axis_i32_2D_keepdims; -mod reduce_sum_single_axis_i32_2D_axis_1; -mod reduce_sum_single_axis_i8_1D; -mod reduce_sum_single_axis_i8_2D_default; -mod reduce_sum_single_axis_i8_2D_keepdims; -mod reduce_sum_single_axis_i8_2D_axis_1; -mod reduce_sum_single_axis_u32_1D; -mod reduce_sum_single_axis_u32_2D_default; -mod reduce_sum_single_axis_u32_2D_keepdims; -mod reduce_sum_single_axis_u32_2D_axis_1; +// mod abs_fp16x16; +// mod abs_fp8x23; +// mod abs_i32; +// mod abs_i8; +// mod acos_fp16x16; +// mod acos_fp8x23; +// mod acosh_fp16x16; +// mod acosh_fp8x23; +// mod add_fp16x16; +// mod add_fp16x16_broadcast; +// mod add_fp8x23; +// mod add_fp8x23_broadcast; +// mod add_i32; +// mod add_i32_broadcast; +// mod add_i8; +// mod add_i8_broadcast; +// mod add_u32; +// mod add_u32_broadcast; +// mod argmax_fp16x16_1D_default; +// mod argmax_fp16x16_1D_keepdims_false; +// mod argmax_fp16x16_1D_last_index; +// mod argmax_fp16x16_2D_default; +// mod argmax_fp16x16_2D_keepdims_false; +// mod argmax_fp16x16_2D_last_index; +// mod argmax_fp16x16_3D_default; +// mod argmax_fp16x16_3D_keepdims_false; +// mod argmax_fp16x16_3D_last_index; +// mod argmax_fp8x23_1D_default; +// mod argmax_fp8x23_1D_keepdims_false; +// mod argmax_fp8x23_1D_last_index; +// mod argmax_fp8x23_2D_default; +// mod argmax_fp8x23_2D_keepdims_false; +// mod argmax_fp8x23_2D_last_index; +// mod argmax_fp8x23_3D_default; +// mod argmax_fp8x23_3D_keepdims_false; +// mod argmax_fp8x23_3D_last_index; +// mod argmax_i32_1D_default; +// mod argmax_i32_1D_keepdims_false; +// mod argmax_i32_1D_last_index; +// mod argmax_i32_2D_default; +// mod argmax_i32_2D_keepdims_false; +// mod argmax_i32_2D_last_index; +// mod argmax_i32_3D_default; +// mod argmax_i32_3D_keepdims_false; +// mod argmax_i32_3D_last_index; +// mod argmax_i8_1D_default; +// mod argmax_i8_1D_keepdims_false; +// mod argmax_i8_1D_last_index; +// mod argmax_i8_2D_default; +// mod argmax_i8_2D_keepdims_false; +// mod argmax_i8_2D_last_index; +// mod argmax_i8_3D_default; +// mod argmax_i8_3D_keepdims_false; +// mod argmax_i8_3D_last_index; +// mod argmax_u32_1D_default; +// mod argmax_u32_1D_keepdims_false; +// mod argmax_u32_1D_last_index; +// mod argmax_u32_2D_default; +// mod argmax_u32_2D_keepdims_false; +// mod argmax_u32_2D_last_index; +// mod argmax_u32_3D_default; +// mod argmax_u32_3D_keepdims_false; +// mod argmax_u32_3D_last_index; +// mod argmin_fp16x16_1D_default; +// mod argmin_fp16x16_1D_keepdims_false; +// mod argmin_fp16x16_1D_last_index; +// mod argmin_fp16x16_2D_default; +// mod argmin_fp16x16_2D_keepdims_false; +// mod argmin_fp16x16_2D_last_index; +// mod argmin_fp16x16_3D_default; +// mod argmin_fp16x16_3D_keepdims_false; +// mod argmin_fp16x16_3D_last_index; +// mod argmin_fp8x23_1D_default; +// mod argmin_fp8x23_1D_keepdims_false; +// mod argmin_fp8x23_1D_last_index; +// mod argmin_fp8x23_2D_default; +// mod argmin_fp8x23_2D_keepdims_false; +// mod argmin_fp8x23_2D_last_index; +// mod argmin_fp8x23_3D_default; +// mod argmin_fp8x23_3D_keepdims_false; +// mod argmin_fp8x23_3D_last_index; +// mod argmin_i32_1D_default; +// mod argmin_i32_1D_keepdims_false; +// mod argmin_i32_1D_last_index; +// mod argmin_i32_2D_default; +// mod argmin_i32_2D_keepdims_false; +// mod argmin_i32_2D_last_index; +// mod argmin_i32_3D_default; +// mod argmin_i32_3D_keepdims_false; +// mod argmin_i32_3D_last_index; +// mod argmin_i8_1D_default; +// mod argmin_i8_1D_keepdims_false; +// mod argmin_i8_1D_last_index; +// mod argmin_i8_2D_default; +// mod argmin_i8_2D_keepdims_false; +// mod argmin_i8_2D_last_index; +// mod argmin_i8_3D_default; +// mod argmin_i8_3D_keepdims_false; +// mod argmin_i8_3D_last_index; +// mod argmin_u32_1D_default; +// mod argmin_u32_1D_keepdims_false; +// mod argmin_u32_1D_last_index; +// mod argmin_u32_2D_default; +// mod argmin_u32_2D_keepdims_false; +// mod argmin_u32_2D_last_index; +// mod argmin_u32_3D_default; +// mod argmin_u32_3D_keepdims_false; +// mod argmin_u32_3D_last_index; +// mod asin_fp16x16; +// mod asin_fp8x23; +// mod asinh_fp16x16; +// mod asinh_fp8x23; +// mod atan_fp16x16; +// mod atan_fp8x23; +// mod ceil_fp16x16; +// mod ceil_fp8x23; +// mod concat_fp16x16_1d; +// mod concat_fp16x16_2d; +// mod concat_fp16x16_3d_default; +// mod concat_fp16x16_3d_axis_1; +// mod concat_fp16x16_3d_axis_2; +// mod concat_fp16x16_3d_three_tensors_axis_1; +// mod concat_fp16x16_3d_three_tensors_axis_2; +// mod concat_fp8x23_1d; +// mod concat_fp8x23_2d; +// mod concat_fp8x23_3d_default; +// mod concat_fp8x23_3d_axis_1; +// mod concat_fp8x23_3d_axis_2; +// mod concat_fp8x23_3d_three_tensors_axis_1; +// mod concat_fp8x23_3d_three_tensors_axis_2; +// mod concat_i32_1d; +// mod concat_i32_2d; +// mod concat_i32_3d_default; +// mod concat_i32_3d_axis_1; +// mod concat_i32_3d_axis_2; +// mod concat_i32_3d_three_tensors_axis_1; +// mod concat_i32_3d_three_tensors_axis_2; +// mod concat_i8_1d; +// mod concat_i8_2d; +// mod concat_i8_3d_default; +// mod concat_i8_3d_axis_1; +// mod concat_i8_3d_axis_2; +// mod concat_i8_3d_three_tensors_axis_1; +// mod concat_i8_3d_three_tensors_axis_2; +// mod concat_u32_1d; +// mod concat_u32_2d; +// mod concat_u32_3d_default; +// mod concat_u32_3d_axis_1; +// mod concat_u32_3d_axis_2; +// mod concat_u32_3d_three_tensors_axis_1; +// mod concat_u32_3d_three_tensors_axis_2; +// mod cos_fp16x16; +// mod cos_fp8x23; +// mod cosh_fp16x16; +// mod cosh_fp8x23; +// mod cumsum_fp16x16_1d_default; +// mod cumsum_fp16x16_1d_exclusive; +// mod cumsum_fp16x16_1d_reverse; +// mod cumsum_fp16x16_1d_reverse_exclusive; +// mod cumsum_fp16x16_2d_axis_0; +// mod cumsum_fp16x16_2d_axis_1; +// mod cumsum_fp8x23_1d_default; +// mod cumsum_fp8x23_1d_exclusive; +// mod cumsum_fp8x23_1d_reverse; +// mod cumsum_fp8x23_1d_reverse_exclusive; +// mod cumsum_fp8x23_2d_axis_0; +// mod cumsum_fp8x23_2d_axis_1; +// mod cumsum_i32_1d_default; +// mod cumsum_i32_1d_exclusive; +// mod cumsum_i32_1d_reverse; +// mod cumsum_i32_1d_reverse_exclusive; +// mod cumsum_i32_2d_axis_0; +// mod cumsum_i32_2d_axis_1; +// mod cumsum_i8_1d_default; +// mod cumsum_i8_1d_exclusive; +// mod cumsum_i8_1d_reverse; +// mod cumsum_i8_1d_reverse_exclusive; +// mod cumsum_i8_2d_axis_0; +// mod cumsum_i8_2d_axis_1; +// mod cumsum_u32_1d_default; +// mod cumsum_u32_1d_exclusive; +// mod cumsum_u32_1d_reverse; +// mod cumsum_u32_1d_reverse_exclusive; +// mod cumsum_u32_2d_axis_0; +// mod cumsum_u32_2d_axis_1; +// mod div_fp16x16; +// mod div_fp16x16_broadcast; +// mod div_fp8x23; +// mod div_fp8x23_broadcast; +// mod div_i32; +// mod div_i32_broadcast; +// mod div_i8; +// mod div_i8_broadcast; +// mod div_u32; +// mod div_u32_broadcast; +// mod equal_fp16x16; +// mod equal_fp16x16_broadcast; +// mod equal_fp8x23; +// mod equal_fp8x23_broadcast; +// mod equal_i32; +// mod equal_i32_broadcast; +// mod equal_i8; +// mod equal_i8_broadcast; +// mod equal_u32; +// mod equal_u32_broadcast; +// mod exp_fp16x16; +// mod exp_fp8x23; +// mod less_equal_fp16x16; +// mod less_equal_fp16x16_broadcast; +// mod less_equal_fp8x23; +// mod less_equal_fp8x23_broadcast; +// mod less_equal_i32; +// mod less_equal_i32_broadcast; +// mod less_equal_i8; +// mod less_equal_i8_broadcast; +// mod less_equal_u32; +// mod less_equal_u32_broadcast; +// mod greater_fp16x16; +// mod greater_fp16x16_broadcast; +// mod greater_fp8x23; +// mod greater_fp8x23_broadcast; +// mod greater_i32; +// mod greater_i32_broadcast; +// mod greater_i8; +// mod greater_i8_broadcast; +// mod greater_u32; +// mod greater_u32_broadcast; +// mod leaky_relu_fp16x16; +// mod leaky_relu_fp8x23; +// mod linear_fp16x16; +// mod linear_fp8x23; +// mod linear_i32; +// mod linear_i8; +// mod linear_u32; +// mod log_fp16x16; +// mod log_fp8x23; +// mod logsoftmax_fp16x16_axis_0; +// mod logsoftmax_fp16x16_axis_1; +// mod logsoftmax_fp8x23_axis_0; +// mod logsoftmax_fp8x23_axis_1; +// mod matmul_fp16x16_1d; +// mod matmul_fp16x16_2x2; +// mod matmul_fp16x16_2x1; +// mod matmul_fp16x16_1x2; +// mod matmul_fp8x23_1d; +// mod matmul_fp8x23_2x2; +// mod matmul_fp8x23_2x1; +// mod matmul_fp8x23_1x2; +// mod matmul_i32_1d; +// mod matmul_i32_2x2; +// mod matmul_i32_2x1; +// mod matmul_i32_1x2; +// mod matmul_i8_1d; +// mod matmul_i8_2x2; +// mod matmul_i8_2x1; +// mod matmul_i8_1x2; +// mod matmul_u32_1d; +// mod matmul_u32_2x2; +// mod matmul_u32_2x1; +// mod matmul_u32_1x2; +// mod mul_fp16x16; +// mod mul_fp16x16_broadcast; +// mod mul_fp8x23; +// mod mul_fp8x23_broadcast; +// mod mul_i32; +// mod mul_i32_broadcast; +// mod mul_i8; +// mod mul_i8_broadcast; +// mod mul_u32; +// mod mul_u32_broadcast; +// mod or_fp16x16; +// mod or_fp16x16_broadcast; +// mod or_fp8x23; +// mod or_fp8x23_broadcast; +// mod or_i32; +// mod or_i32_broadcast; +// mod or_i8; +// mod or_i8_broadcast; +// mod or_u32; +// mod or_u32_broadcast; +// mod relu_fp16x16; +// mod relu_fp8x23; +// mod relu_i32; +// mod relu_i8; +// mod sigmoid_fp16x16; +// mod sigmoid_fp8x23; +// mod sin_fp16x16; +// mod sin_fp8x23; +// mod sinh_fp16x16; +// mod sinh_fp8x23; +// mod softmax_fp16x16; +// mod softmax_fp8x23; +// mod softplus_fp8x23; +// mod softplus_fp16x16; +// mod softsign_fp8x23; +// mod softsign_fp16x16; +// mod sqrt_fp16x16; +// mod sqrt_fp8x23; +// mod sub_fp16x16; +// mod sub_fp16x16_broadcast; +// mod sub_fp8x23; +// mod sub_fp8x23_broadcast; +// mod sub_i32; +// mod sub_i32_broadcast; +// mod sub_i8; +// mod sub_i8_broadcast; +// mod sub_u32; +// mod sub_u32_broadcast; +// mod tanh_fp16x16; +// mod tanh_fp8x23; +// mod transpose_fp16x16_2d; +// mod transpose_fp16x16_3d; +// mod transpose_fp8x23_2d; +// mod transpose_fp8x23_3d; +// mod transpose_i32_2d; +// mod transpose_i32_3d; +// mod transpose_i8_2d; +// mod transpose_i8_3d; +// mod transpose_u32_2d; +// mod transpose_u32_3d; +// mod xor_fp16x16; +// mod xor_fp16x16_broadcast; +// mod xor_fp8x23; +// mod xor_fp8x23_broadcast; +// mod xor_i32; +// mod xor_i32_broadcast; +// mod xor_i8; +// mod xor_i8_broadcast; +// mod xor_u32; +// mod xor_u32_broadcast; +// mod less_fp16x16; +// mod less_fp16x16_broadcast; +// mod less_fp8x23; +// mod less_fp8x23_broadcast; +// mod less_i32; +// mod less_i32_broadcast; +// mod less_i8; +// mod less_i8_broadcast; +// mod less_u32; +// mod less_u32_broadcast; +// mod greater_equal_fp16x16; +// mod greater_equal_fp16x16_broadcast; +// mod greater_equal_fp8x23; +// mod greater_equal_fp8x23_broadcast; +// mod greater_equal_i32; +// mod greater_equal_i32_broadcast; +// mod greater_equal_i8; +// mod greater_equal_i8_broadcast; +// mod greater_equal_u32; +// mod greater_equal_u32_broadcast; +// mod slice_fp16x16_2d; +// mod slice_fp16x16_3d; +// mod slice_fp8x23_2d; +// mod slice_fp8x23_3d; +// mod slice_i32_2d; +// mod slice_i32_3d; +// mod slice_i8_2d; +// mod slice_i8_3d; +// mod slice_u32_2d; +// mod slice_u32_3d; +// mod gather_fp8x23_3d_default; +// mod gather_fp8x23_3d_axis1; +// mod gather_fp8x23_3d_axis2; +// mod gather_fp16x16_3d_default; +// mod gather_fp16x16_3d_axis1; +// mod gather_fp16x16_3d_axis2; +// mod gather_i8_3d_default; +// mod gather_i8_3d_axis1; +// mod gather_i8_3d_axis2; +// mod gather_i32_3d_default; +// mod gather_i32_3d_axis1; +// mod gather_i32_3d_axis2; +// mod gather_u32_3d_default; +// mod gather_u32_3d_axis1; +// mod gather_u32_3d_axis2; +// mod nonzero_fp16x16_2d; +// mod nonzero_fp16x16_3d; +// mod nonzero_fp8x23_2d; +// mod nonzero_fp8x23_3d; +// mod nonzero_i32_2d; +// mod nonzero_i32_3d; +// mod nonzero_i8_2d; +// mod nonzero_i8_3d; +// mod nonzero_u32_2d; +// mod nonzero_u32_3d; +// mod squeeze_fP16x16; +// mod squeeze_fP8x23; +// mod squeeze_i32; +// mod squeeze_i8; +// mod squeeze_u32; +// mod unsqueeze_fp16x16_2d; +// mod unsqueeze_fp16x16_3d; +// mod unsqueeze_fp8x23_2d; +// mod unsqueeze_fp8x23_3d; +// mod unsqueeze_i32_2d; +// mod unsqueeze_i32_3d; +// mod unsqueeze_i8_2d; +// mod unsqueeze_i8_3d; +// mod unsqueeze_u32_2d; +// mod unsqueeze_u32_3d; +// mod sign_fP16x16; +// mod sign_fP8x23; +// mod sign_fail; +// mod sign_i32; +// mod sign_i8; +// mod clip_fp16x16_2d; +// mod clip_fp16x16_3d; +// mod clip_fp8x23_2d; +// mod clip_fp8x23_3d; +// mod clip_i32_2d; +// mod clip_i32_3d; +// mod clip_i8_2d; +// mod clip_i8_3d; +// mod clip_u32_2d; +// mod clip_u32_3d; +// mod identity_fP16x16; +// mod identity_fP8x23; +// mod identity_i32; +// mod identity_i8; +// mod identity_u32; +// mod thresholded_relu_fp16x16; +// mod thresholded_relu_fp8x23; +// mod hard_sigmoid_fp8x23; +// mod hard_sigmoid_fp16x16; +// mod neg_fp16x16; +// mod neg_fp8x23; +// mod neg_i32; +// mod neg_i8; +// mod gemm_all_attributes; +// mod gemm_alpha; +// mod gemm_beta; +// mod gemm_default_matrix_bias; +// mod gemm_default_vector_bias; +// mod gemm_default_no_bias; +// mod gemm_transposeA; +// mod gemm_transposeB; +// mod min_fp16x16_three_tensors; +// mod min_fp16x16_broadcast_three_tensors; +// mod min_fp16x16_two_tensors; +// mod min_fp16x16_broadcast_two_tensors; +// mod min_fp8x23_three_tensors; +// mod min_fp8x23_broadcast_three_tensors; +// mod min_fp8x23_two_tensors; +// mod min_fp8x23_broadcast_two_tensors; +// mod min_i32_three_tensors; +// mod min_i32_broadcast_three_tensors; +// mod min_i32_two_tensors; +// mod min_i32_broadcast_two_tensors; +// mod min_i8_three_tensors; +// mod min_i8_broadcast_three_tensors; +// mod min_i8_two_tensors; +// mod min_i8_broadcast_two_tensors; +// mod min_u32_three_tensors; +// mod min_u32_broadcast_three_tensors; +// mod min_u32_two_tensors; +// mod min_u32_broadcast_two_tensors; +// mod where_fp16x16; +// mod where_fp16x16_broadcast; +// mod where_fp8x23; +// mod where_fp8x23_broadcast; +// mod where_i32; +// mod where_i32_broadcast; +// mod where_i8; +// mod where_i8_broadcast; +// mod where_u32; +// mod where_u32_broadcast; +// mod not_bool; +// mod round_fp16x16; +// mod round_fp8x23; +// mod max_fp16x16_three_tensors; +// mod max_fp16x16_broadcast_three_tensors; +// mod max_fp16x16_two_tensors; +// mod max_fp16x16_broadcast_two_tensors; +// mod max_fp8x23_three_tensors; +// mod max_fp8x23_broadcast_three_tensors; +// mod max_fp8x23_two_tensors; +// mod max_fp8x23_broadcast_two_tensors; +// mod max_i32_three_tensors; +// mod max_i32_broadcast_three_tensors; +// mod max_i32_two_tensors; +// mod max_i32_broadcast_two_tensors; +// mod max_i8_three_tensors; +// mod max_i8_broadcast_three_tensors; +// mod max_i8_two_tensors; +// mod max_i8_broadcast_two_tensors; +// mod max_u32_three_tensors; +// mod max_u32_broadcast_three_tensors; +// mod max_u32_two_tensors; +// mod max_u32_broadcast_two_tensors; +// mod scatter_fp16x16_3d_default; +// mod scatter_fp16x16_3d_axis1; +// mod scatter_fp16x16_3d_axis1_add; +// mod scatter_fp8x23_default; +// mod scatter_fp8x23_axis1; +// mod scatter_fp8x23_mul; +// mod scatter_i8_default; +// mod scatter_i8_axis1; +// mod scatter_i8_axis1_max; +// mod scatter_u32_default; +// mod scatter_u32_axis1; +// mod scatter_u32_add; +// mod array_feature_extractor_1D_i32; +// mod array_feature_extractor_1D_fp8x23; +// mod array_feature_extractor_1D_fp16x16; +// mod array_feature_extractor_2D_i32; +// mod array_feature_extractor_2D_fp8x23; +// mod array_feature_extractor_2D_fp16x16; +// mod array_feature_extractor_3D_i32; +// mod array_feature_extractor_3D_fp8x23; +// mod array_feature_extractor_3D_fp16x16; +// mod binarizer_fp16x16; +// mod binarizer_fp8x23; +// mod tril_fp16x16; +// mod tril_fp16x16_neg; +// mod tril_fp16x16_one_row; +// mod tril_fp16x16_out_neg; +// mod tril_fp16x16_out_pos; +// mod tril_fp16x16_pos; +// mod tril_fp16x16_square; +// mod tril_fp16x16_square_neg; +// mod tril_fp16x16_zero; +// mod triu_fp16x16; +// mod triu_fp16x16_neg; +// mod triu_fp16x16_one_row; +// mod triu_fp16x16_out_neg; +// mod triu_fp16x16_out_pos; +// mod triu_fp16x16_pos; +// mod triu_fp16x16_square; +// mod triu_fp16x16_square_neg; +// mod triu_fp16x16_zero; +// mod tril_fp8x23; +// mod tril_fp8x23_neg; +// mod tril_fp8x23_one_row; +// mod tril_fp8x23_out_neg; +// mod tril_fp8x23_out_pos; +// mod tril_fp8x23_pos; +// mod tril_fp8x23_square; +// mod tril_fp8x23_square_neg; +// mod tril_fp8x23_zero; +// mod triu_fp8x23; +// mod triu_fp8x23_neg; +// mod triu_fp8x23_one_row; +// mod triu_fp8x23_out_neg; +// mod triu_fp8x23_out_pos; +// mod triu_fp8x23_pos; +// mod triu_fp8x23_square; +// mod triu_fp8x23_square_neg; +// mod triu_fp8x23_zero; +// mod tril_i32; +// mod tril_neg_i32; +// mod tril_i32_one_row; +// mod tril_i32_out_neg; +// mod tril_i32_out_pos; +// mod tril_i32_pos; +// mod tril_i32_square; +// mod tril_i32_square_neg; +// mod tril_i32_zero; +// mod triu_i32; +// mod triu_i32_neg; +// mod triu_i32_one_row; +// mod triu_i32_out_neg; +// mod triu_i32_out_pos; +// mod triu_i32_pos; +// mod triu_i32_square; +// mod triu_i32_square_neg; +// mod triu_i32_zero; +// mod tril_i8; +// mod tril_i8_neg; +// mod tril_i8_one_row; +// mod tril_i8_out_neg; +// mod tril_i8_out_pos; +// mod tril_i8_pos; +// mod tril_i8_square; +// mod tril_i8_square_neg; +// mod tril_i8_zero; +// mod triu_i8; +// mod triu_i8_neg; +// mod triu_i8_one_row; +// mod triu_i8_out_neg; +// mod triu_i8_out_pos; +// mod triu_i8_pos; +// mod triu_i8_square; +// mod triu_i8_square_neg; +// mod triu_i8_zero; +// mod tril_u32; +// mod tril_u32_neg; +// mod tril_u32_one_row; +// mod tril_u32_out_neg; +// mod tril_u32_out_pos; +// mod tril_u32_pos; +// mod tril_u32_square; +// mod tril_u32_square_neg; +// mod tril_u32_zero; +// mod triu_u32; +// mod triu_u32_neg; +// mod triu_u32_one_row; +// mod triu_u32_out_neg; +// mod triu_u32_out_pos; +// mod triu_u32_pos; +// mod triu_u32_square; +// mod triu_u32_square_neg; +// mod triu_u32_zero; +// mod reduce_sum_square_fp16x16_export_do_not_keepdims; +// mod reduce_sum_square_fp16x16_export_keepdims; +// mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; +// mod reduce_sum_square_fp8x23_export_do_not_keepdims; +// mod reduce_sum_square_fp8x23_export_keepdims; +// mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; +// mod reduce_sum_square_i32_export_do_not_keepdims; +// mod reduce_sum_square_i32_export_keepdims; +// mod reduce_sum_square_i32_export_negative_axes_keepdims; +// mod reduce_sum_square_i8_export_do_not_keepdims; +// mod reduce_sum_square_i8_export_keepdims; +// mod reduce_sum_square_i8_export_negative_axes_keepdims; +// mod reduce_sum_square_u32_export_do_not_keepdims; +// mod reduce_sum_square_u32_export_keepdims; +// mod reduce_sum_square_u32_export_negative_axes_keepdims; +// mod reduce_l2_fp16x16_export_do_not_keepdims; +// mod reduce_l2_fp16x16_export_keepdims; +// mod reduce_l2_fp16x16_export_negative_axes_keepdims; +// mod reduce_l2_fp8x23_export_do_not_keepdims; +// mod reduce_l2_fp8x23_export_keepdims; +// mod reduce_l2_fp8x23_export_negative_axes_keepdims; +// mod reduce_l1_fp16x16_export_do_not_keepdims; +// mod reduce_l1_fp16x16_export_keepdims; +// mod reduce_l1_fp16x16_export_negative_axes_keepdims; +// mod reduce_l1_fp8x23_export_do_not_keepdims; +// mod reduce_l1_fp8x23_export_keepdims; +// mod reduce_l1_fp8x23_export_negative_axes_keepdims; +// mod reduce_l1_i32_export_do_not_keepdims; +// mod reduce_l1_i32_export_keepdims; +// mod reduce_l1_i32_export_negative_axes_keepdims; +// mod reduce_l1_i8_export_do_not_keepdims; +// mod reduce_l1_i8_export_keepdims; +// mod reduce_l1_i8_export_negative_axes_keepdims; +// mod reduce_l1_u32_export_do_not_keepdims; +// mod reduce_l1_u32_export_keepdims; +// mod reduce_l1_u32_export_negative_axes_keepdims; +// mod reduce_prod_fp16x16_1D; +// mod reduce_prod_fp16x16_2D_default; +// mod reduce_prod_fp16x16_2D_keepdims; +// mod reduce_prod_fp16x16_2D_axis_1; +// mod reduce_prod_fp8x23_1D; +// mod reduce_prod_fp8x23_2D_default; +// mod reduce_prod_fp8x23_2D_keepdims; +// mod reduce_prod_fp8x23_2D_axis_1; +// mod reduce_prod_i32_1D; +// mod reduce_prod_i32_2D_default; +// mod reduce_prod_i32_2D_keepdims; +// mod reduce_prod_i32_2D_axis_1; +// mod reduce_prod_i8_1D; +// mod reduce_prod_i8_2D_default; +// mod reduce_prod_i8_2D_keepdims; +// mod reduce_prod_i8_2D_axis_1; +// mod reduce_prod_u32_1D; +// mod reduce_prod_u32_2D_default; +// mod reduce_prod_u32_2D_keepdims; +// mod reduce_prod_u32_2D_axis_1; +// mod gather_elements_fp16x16_3d_default; +// mod gather_elements_fp16x16_3d_axis1; +// mod gather_elements_fp16x16_3d_axis2; +// mod gather_elements_fp8x23_3d_default; +// mod gather_elements_fp8x23_3d_axis1; +// mod gather_elements_fp8x23_3d_axis2; +// mod gather_elements_i8_3d_default; +// mod gather_elements_i8_3d_axis1; +// mod gather_elements_i32_3d_default; +// mod gather_elements_i32_3d_axis1; +// mod gather_elements_i32_3d_axis2; +// mod gather_elements_u32_default; +// mod gather_elements_u32_axis1; +// mod gather_elements_u32_axis2; +// mod gather_elements_u32_axis3; +// mod sequence_length_fp16x16; +// mod sequence_length_fp16x16_broadcast; +// mod sequence_length_fp8x23; +// mod sequence_length_fp8x23_broadcast; +// mod sequence_length_i32; +// mod sequence_length_i32_broadcast; +// mod sequence_length_i8; +// mod sequence_length_i8_broadcast; +// mod sequence_length_u32; +// mod sequence_length_u32_broadcast; +// mod sequence_at_u32_positive; +// mod sequence_at_u32_negative; +// mod sequence_at_fp16x16_positive; +// mod sequence_at_fp16x16_negative; +// mod sequence_at_fp8x23_positive; +// mod sequence_at_fp8x23_negative; +// mod sequence_at_i32_positive; +// mod sequence_at_i32_negative; +// mod sequence_at_i8_positive; +// mod sequence_at_i8_negative; +// mod reduce_min_fp16x16_1D; +// mod reduce_min_fp16x16_2D_default; +// mod reduce_min_fp16x16_2D_keepdims; +// mod reduce_min_fp16x16_2D_axis_1; +// mod reduce_min_fp8x23_1D; +// mod reduce_min_fp8x23_2D_default; +// mod reduce_min_fp8x23_2D_keepdims; +// mod reduce_min_fp8x23_2D_axis_1; +// mod reduce_min_i32_1D; +// mod reduce_min_i32_2D_default; +// mod reduce_min_i32_2D_keepdims; +// mod reduce_min_i32_2D_axis_1; +// mod reduce_min_i8_1D; +// mod reduce_min_i8_2D_default; +// mod reduce_min_i8_2D_keepdims; +// mod reduce_min_i8_2D_axis_1; +// mod reduce_min_u32_1D; +// mod reduce_min_u32_2D_default; +// mod reduce_min_u32_2D_keepdims; +// mod reduce_min_u32_2D_axis_1; +// mod sequence_construct_fp16x16; +// mod sequence_construct_fp8x23; +// mod sequence_construct_i32; +// mod sequence_construct_i8; +// mod sequence_construct_u32; +// mod shrink_hard_fp16x16; +// mod shrink_soft_fp16x16; +// mod shrink_hard_fp8x23; +// mod shrink_soft_fp8x23; +// mod sequence_empty_fp16x16; +// mod sequence_empty_fp8x23; +// mod sequence_empty_i32; +// mod sequence_empty_i8; +// mod sequence_empty_u32; +// mod reduce_mean_fp16x16_1D; +// mod reduce_mean_fp16x16_2D_default; +// mod reduce_mean_fp16x16_2D_keepdims; +// mod reduce_mean_fp16x16_2D_axis_1; +// mod reduce_mean_fp8x23_1D; +// mod reduce_mean_fp8x23_2D_default; +// mod reduce_mean_fp8x23_2D_keepdims; +// mod reduce_mean_fp8x23_2D_axis_1; +// mod reduce_mean_i32_1D; +// mod reduce_mean_i32_2D_default; +// mod reduce_mean_i32_2D_keepdims; +// mod reduce_mean_i32_2D_axis_1; +// mod reduce_mean_i8_1D; +// mod reduce_mean_i8_2D_default; +// mod reduce_mean_i8_2D_keepdims; +// mod reduce_mean_i8_2D_axis_1; +// mod reduce_mean_u32_1D; +// mod reduce_mean_u32_2D_default; +// mod reduce_mean_u32_2D_keepdims; +// mod reduce_mean_u32_2D_axis_1; +// mod pow_fp16x16; +// mod pow_fp16x16_broadcast; +// mod pow_fp8x23; +// mod pow_fp8x23_broadcast; +// mod sequence_erase_u32_positive; +// mod sequence_erase_u32_negative; +// mod sequence_erase_u32_empty; +// mod sequence_erase_fp16x16_positive; +// mod sequence_erase_fp16x16_negative; +// mod sequence_erase_fp16x16_empty; +// mod sequence_erase_fp8x23_positive; +// mod sequence_erase_fp8x23_negative; +// mod sequence_erase_fp8x23_empty; +// mod sequence_erase_i32_positive; +// mod sequence_erase_i32_negative; +// mod sequence_erase_i32_empty; +// mod sequence_erase_i8_positive; +// mod sequence_erase_i8_negative; +// mod sequence_erase_i8_empty; +// mod sequence_insert_fp16x16; +// mod sequence_insert_fp8x23; +// mod sequence_insert_i32; +// mod sequence_insert_i8; +// mod sequence_insert_u32; +// mod concat_from_sequence_fp8x23_new_axis_zero; +// mod concat_from_sequence_fp8x23_new_axis_one; +// mod concat_from_sequence_fp8x23_new_axis_default; +// mod concat_from_sequence_fp16x16_new_axis_zero; +// mod concat_from_sequence_fp16x16_new_axis_one; +// mod concat_from_sequence_fp16x16_new_axis_default; +// mod concat_from_sequence_i32_new_axis_zero; +// mod concat_from_sequence_i32_new_axis_one; +// mod concat_from_sequence_i32_new_axis_default; +// mod concat_from_sequence_i8_new_axis_zero; +// mod concat_from_sequence_i8_new_axis_one; +// mod concat_from_sequence_i8_new_axis_default; +// mod concat_from_sequence_u32_new_axis_zero; +// mod concat_from_sequence_u32_new_axis_one; +// mod concat_from_sequence_u32_new_axis_default; +// mod is_nan_fp16x16; +// mod is_nan_fp8x23; +// mod is_inf_fp16x16; +// mod is_inf_fp8x23; +// mod is_inf_i32; +// mod is_inf_i8; +// mod is_inf_u32; +// mod is_pos_inf_fp16x16; +// mod is_neg_inf_fp16x16; +// mod is_pos_inf_fp8x23; +// mod is_neg_inf_fp8x23; +// mod is_pos_inf_i32; +// mod is_neg_inf_i32; +// mod is_pos_inf_i8; +// mod is_neg_inf_i8; +// mod reduce_log_sum_fp8x23_export_do_not_keepdims; +// mod reduce_log_sum_fp8x23_export_keepdims; +// mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; +// mod reduce_log_sum_fp16x16_export_do_not_keepdims; +// mod reduce_log_sum_fp16x16_export_keepdims; +// mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; +// mod and_bool; +// mod erf_fp16x16; +// mod erf_fp8x23; +// mod unique_fp16x16_without_axis_sorted; +// mod unique_fp16x16_with_axis_zero_sorted; +// mod unique_u32_without_axis_sorted; +// mod unique_u32_without_axis_not_sorted; +// mod unique_u32_with_axis_zero_sorted; +// mod unique_u32_with_axis_zero_not_sorted; +// mod unique_u32_with_axis_one_sorted; +// mod unique_u32_with_axis_one_not_sorted; +// mod gather_nd_fp16x16_3d_default; +// mod gather_nd_fp16x16_3d_batch_dims1; +// mod gather_nd_fp16x16_3d_batch_dims2; +// mod gather_nd_fp8x23_3d_default; +// mod gather_nd_fp8x23_3d_batch_dims1; +// mod gather_nd_fp8x23_3d_batch_dims2; +// mod gather_nd_i32_3d_default; +// mod gather_nd_i32_3d_batch_dims1; +// mod gather_nd_i32_3d_batch_dims2; +// mod gather_nd_i8_3d_default; +// mod gather_nd_i8_3d_batch_dims1; +// mod gather_nd_u32_default; +// mod gather_nd_u32_batch_dims1; +// mod gather_nd_u32_batch_dims2; +// mod resize_upsample_scales_nearest; +// mod resize_downsample_scales_cubic; +// mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; +// mod resize_downsample_scales_cubic_align_corners; +// mod resize_upsample_scales_linear; +// mod resize_downsample_scales_linear_align_corners; +// mod resize_downsample_scales_nearest; +// mod resize_upsample_scales_cubic; +// mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; +// mod resize_upsample_scales_cubic_align_corners; +// mod resize_upsample_scales_cubic_asymmetric; +// mod resize_upsample_scales_linear_align_corners; +// mod resize_upsample_sizes_nearest; +// mod resize_upsample_sizes_cubic; +// mod resize_downsample_sizes_cubic; +// mod resize_downsample_sizes_nearest; +// mod resize_upsample_scales_linear_half_pixel_symmetric; +// mod resize_downsample_scales_cubic_antialias; +// mod resize_downsample_scales_linear_antialias; +// mod resize_downsample_sizes_cubic_antialias; +// mod resize_downsample_sizes_linear_pytorch_half_pixel; +// mod resize_tf_crop_and_resize; +// mod resize_tf_crop_and_resize_extrapolation_value; +// mod resize_upsample_scales_nearest_axes_2_3; +// mod resize_upsample_scales_nearest_axes_3_2; +// mod resize_upsample_sizes_nearest_axes_2_3; +// mod resize_upsample_sizes_nearest_ceil_half_pixel; +// mod resize_upsample_sizes_nearest_floor_align_corners; +// mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; +// mod resize_downsample_scales_linear_half_pixel_symmetric; +// mod resize_downsample_sizes_nearest_not_larger; +// mod resize_downsample_sizes_nearest_not_smaller; +// mod resize_tf_crop_and_resize_axes_2_3; +// mod resize_tf_crop_and_resize_axes_3_2; +// mod resize_upsample_sizes_nearest_axes_3_2; +// mod resize_upsample_sizes_nearest_not_larger; +// mod resize_upsample_sizes_nearest_not_smaller; +// mod compress_fp16x16_3d_default; +// mod compress_fp16x16_3d_axis1; +// mod compress_fp16x16_3d_axis2; +// mod compress_fp16x16_3d_axis3; +// mod compress_fp16x16_3d_noaxis; +// mod compress_fp8x23_3d_default; +// mod compress_fp8x23_3d_axis1; +// mod compress_fp8x23_3d_axis2; +// mod compress_i32_3d_default; +// mod compress_i32_3d_axis1; +// mod compress_i32_3d_axis2; +// mod compress_i8_3d_default; +// mod compress_i8_3d_axis1; +// mod compress_i8_3d_axis2; +// mod compress_u32_3d_default; +// mod compress_u32_3d_axis1; +// mod compress_u32_3d_axis2; +// mod compress_u32_3d_axis2_2; +// mod compress_u32_3d_axis3; +// mod reduce_log_sum_exp_fp32x32_export_do_not_keepdims; +// mod reduce_log_sum_exp_fp32x32_export_keepdims; +// mod reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims; +// mod layer_normalization_default_axis; +// mod layer_normalization_4d_axis0; +// mod layer_normalization_4d_axis1; +// mod layer_normalization_4d_axis2; +// mod layer_normalization_4d_axis3; +// mod layer_normalization_3d_axis0_epsilon; +// mod layer_normalization_3d_axis_negative_3_epsilon; +// mod layer_normalization_3d_axis1_epsilon; +// mod layer_normalization_3d_axis2_epsilon; +// mod layer_normalization_4d_axis_negative_4; +// mod layer_normalization_4d_axis_negative_3; +// mod layer_normalization_4d_axis_negative_2; +// mod layer_normalization_4d_axis_negative_1; +// mod layer_normalization_3d_axis_negative_2_epsilon; +// mod layer_normalization_3d_axis_negative_1_epsilon; +// mod layer_normalization_test; +// mod split_u32_1d_equal_parts; +// mod split_u32_2d_equal_parts; +// mod split_u32_zero_size; +// mod split_u32_1d_variable_parts; +// mod split_u32_2d_variable_parts; +// mod split_u32_1d_uneven; +// mod split_u32_2d_uneven; +// mod split_fp16x16_1d_equal_parts; +// mod split_fp16x16_1d_variable_parts; +// mod split_fp16x16_2d_equal_parts; +// mod split_fp16x16_2d_variable_parts; +// mod split_fp16x16_zero_size; +// mod split_fp16x16_1d_uneven; +// mod split_fp16x16_2d_uneven; +// mod grid_sample; +// mod grid_sample_cubic; +// mod grid_sample_aligncorners; +// mod grid_sample_nearest; +// mod grid_sample_nearest_aligncorner; +// mod grid_sample_padding_border; +// mod grid_sample_padding_reflection; +// mod grid_sample_padding_zeros; +// mod col2im; +// mod col2im_5D; +// mod col2im_dilations; +// mod col2im_pads; +// mod col2im_strides; +// mod random_uniform_like_fp16x16; +// mod random_uniform_like_fp8x23; +// mod range_fp8x23; +// mod range_fp16x16; +// mod range_i32; +// mod range_i8; +// mod range_u32; +// mod hann_window_fp8x23; +// mod hann_window_fp16x16; +// mod hamming_window_fp16x16; +// mod hamming_window_fp8x23; +// mod blackman_window_fp16x16; +// mod blackman_window_fp8x23; +// mod split_to_sequence_fp16x16_1d_equal_parts; +// mod split_to_sequence_fp16x16_1d_variable_parts; +// mod split_to_sequence_fp16x16_2d_equal_parts; +// mod split_to_sequence_fp16x16_2d_variable_parts; +// mod split_to_sequence_fp16x16_zero_size; +// mod split_to_sequence_fp16x16_1d_uneven; +// mod split_to_sequence_fp16x16_2d_uneven; +// mod split_to_sequence_u32_1d_equal_parts; +// mod split_to_sequence_u32_1d_variable_parts; +// mod split_to_sequence_u32_2d_equal_parts; +// mod split_to_sequence_u32_2d_variable_parts; +// mod split_to_sequence_u32_zero_size; +// mod split_to_sequence_u32_1d_uneven; +// mod split_to_sequence_u32_2d_uneven; +// mod split_to_sequence_2d_scalar; +// mod split_to_sequence_2d_nokeepdims; +// mod split_to_sequence_1d_nokeepdims; +// mod reverse_sequence_fp16x16_batch_equal_parts; +// mod reverse_sequence_fp16x16_time_equal_parts; +// mod reverse_sequence_i32_batch_equal_parts; +// mod reverse_sequence_i32_time_equal_parts; +// mod reverse_sequence_i8_batch_equal_parts; +// mod reverse_sequence_i8_time_equal_parts; +// mod reverse_sequence_u32_4x4_batch; +// mod reverse_sequence_u32_4x4_time; +// mod reverse_sequence_u32_3x3_batch; +// mod reverse_sequence_u32_3x3_time; +// mod reverse_sequence_different_dimensions_4_5; +// mod reverse_sequence_different_dimensions_2_4; +// mod reverse_sequence_different_dimensions_1_6; +// mod reverse_sequence_different_dimensions_3x9_batch; +// mod reverse_sequence_different_dimensions_3x9_time; +// mod conv_transpose; +// mod conv_transpose_1d; +// mod conv_transpose_3d; +// mod conv_transpose_attributes; +// mod conv_transpose_autopad_same; +// mod conv_transpose_dilations; +// mod conv_transpose_pads; +// mod conv_transpose_group_2; +// mod conv_transpose_group_2_image_3; +// mod depth_to_space_fp16x16; +// mod depth_to_space_fp8x23; +// mod depth_to_space_i32; +// mod depth_to_space_i8; +// mod depth_to_space_u32; +// mod space_to_depth_fp16x16; +// mod space_to_depth_fp8x23; +// mod space_to_depth_i32; +// mod space_to_depth_i8; +// mod space_to_depth_u32; +// mod scatter_nd_fp16x16_3d_default; +// mod scatter_nd_fp16x16_3d_add; +// mod scatter_nd_fp16x16_3d_mul; +// mod scatter_nd_fp16x16_3d_max; +// mod scatter_nd_fp16x16_3d_min; +// mod scatter_nd_fp8x23_3d_default; +// mod scatter_nd_fp8x23_3d_add; +// mod scatter_nd_fp8x23_3d_mul; +// mod scatter_nd_fp8x23_3d_max; +// mod scatter_nd_fp8x23_3d_min; +// mod scatter_nd_u32_default; +// mod scatter_nd_u32_add; +// mod scatter_nd_u32_mul; +// mod scatter_nd_u32_max; +// mod scatter_nd_u32_min; +// mod conv_2D_with_padding; +// mod conv_1D_no_padding; +// mod conv_1D_with_padding; +// mod conv_3D_no_padding; +// mod conv_3D_with_padding; +// mod conv_4D_no_padding; +// mod conv_2D_with_2_groups; +// mod conv_2D_with_autopad_same; +// mod conv_2D_with_strides_asymmetric_padding; +// mod conv_2D_with_strides_with_padding; +// mod conv_4D_with_padding; +// mod label_encoder_fp16x16_3d_default; +// mod label_encoder_fp8x23_default; +// mod label_encoder_i8_default; +// mod label_encoder_i32_default; +// mod label_encoder_u32_default; +// mod reduce_sum_single_axis_fp16x16_1D; +// mod reduce_sum_single_axis_fp16x16_2D_default; +// mod reduce_sum_single_axis_fp16x16_2D_keepdims; +// mod reduce_sum_single_axis_fp16x16_2D_axis_1; +// mod reduce_sum_single_axis_fp8x23_1D; +// mod reduce_sum_single_axis_fp8x23_2D_default; +// mod reduce_sum_single_axis_fp8x23_2D_keepdims; +// mod reduce_sum_single_axis_fp8x23_2D_axis_1; +// mod reduce_sum_single_axis_i32_1D; +// mod reduce_sum_single_axis_i32_2D_default; +// mod reduce_sum_single_axis_i32_2D_keepdims; +// mod reduce_sum_single_axis_i32_2D_axis_1; +// mod reduce_sum_single_axis_i8_1D; +// mod reduce_sum_single_axis_i8_2D_default; +// mod reduce_sum_single_axis_i8_2D_keepdims; +// mod reduce_sum_single_axis_i8_2D_axis_1; +// mod reduce_sum_single_axis_u32_1D; +// mod reduce_sum_single_axis_u32_2D_default; +// mod reduce_sum_single_axis_u32_2D_keepdims; +// mod reduce_sum_single_axis_u32_2D_axis_1; +mod reduce_sum_keep_dims; diff --git a/tests/nodes/reduce_sum_keep_dims.cairo b/tests/nodes/reduce_sum_keep_dims.cairo new file mode 100644 index 000000000..c3baad2c0 --- /dev/null +++ b/tests/nodes/reduce_sum_keep_dims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_keep_dims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum(Option::Some(array![1].span()), true, false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_keep_dims/input_0.cairo b/tests/nodes/reduce_sum_keep_dims/input_0.cairo new file mode 100644 index 000000000..2de5818c3 --- /dev/null +++ b/tests/nodes/reduce_sum_keep_dims/input_0.cairo @@ -0,0 +1,26 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + data.append(12); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_keep_dims/output_0.cairo b/tests/nodes/reduce_sum_keep_dims/output_0.cairo new file mode 100644 index 000000000..5326997d6 --- /dev/null +++ b/tests/nodes/reduce_sum_keep_dims/output_0.cairo @@ -0,0 +1,20 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(1); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(4); + data.append(6); + data.append(12); + data.append(14); + data.append(20); + data.append(22); + TensorTrait::new(shape.span(), data.span()) +} From 2087b465ed354740f698c251f60fd66ca5f1ab04 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Thu, 21 Mar 2024 09:09:21 +0100 Subject: [PATCH 15/68] test reduce_sum --- nodegen/node/reduce_sum.py | 47 ++++++++++++++++++- tests/nodes.cairo | 3 ++ .../reduce_sum_default_axes_keepdims.cairo | 20 ++++++++ .../input_0.cairo | 26 ++++++++++ .../output_0.cairo | 15 ++++++ .../reduce_sum_empty_axes_input_noop.cairo | 20 ++++++++ .../input_0.cairo | 26 ++++++++++ .../output_0.cairo | 26 ++++++++++ tests/nodes/reduce_sum_keep_dims.cairo | 4 +- tests/nodes/reduce_sum_no_keep_dims.cairo | 20 ++++++++ .../reduce_sum_no_keep_dims/input_0.cairo | 26 ++++++++++ .../reduce_sum_no_keep_dims/output_0.cairo | 19 ++++++++ 12 files changed, 249 insertions(+), 3 deletions(-) create mode 100644 tests/nodes/reduce_sum_default_axes_keepdims.cairo create mode 100644 tests/nodes/reduce_sum_default_axes_keepdims/input_0.cairo create mode 100644 tests/nodes/reduce_sum_default_axes_keepdims/output_0.cairo create mode 100644 tests/nodes/reduce_sum_empty_axes_input_noop.cairo create mode 100644 tests/nodes/reduce_sum_empty_axes_input_noop/input_0.cairo create mode 100644 tests/nodes/reduce_sum_empty_axes_input_noop/output_0.cairo create mode 100644 tests/nodes/reduce_sum_no_keep_dims.cairo create mode 100644 tests/nodes/reduce_sum_no_keep_dims/input_0.cairo create mode 100644 tests/nodes/reduce_sum_no_keep_dims/output_0.cairo diff --git a/nodegen/node/reduce_sum.py b/nodegen/node/reduce_sum.py index 4f7a213c8..53b2ed824 100644 --- a/nodegen/node/reduce_sum.py +++ b/nodegen/node/reduce_sum.py @@ -4,6 +4,23 @@ class Reduce_sum(RunAll): + + @staticmethod + def reduce_sum_no_keep_dims(): + axes = np.array([1], dtype=np.uint32) + keepdims = 0 + + x = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [ + [9, 10], [11, 12]]]).astype(np.uint32) + y = np.sum(x, axis=tuple(axes.tolist()), keepdims=keepdims == 1) + + x = Tensor(Dtype.U32, x.shape, x.flatten()) + y = Tensor(Dtype.U32, y.shape, y.flatten()) + + name = "reduce_sum_no_keep_dims" + make_test( + [x], y, "input_0.reduce_sum(Option::Some(array![1].span()), Option::Some(false), Option::None)", name) + @staticmethod def reduce_sum_keep_dims(): axes = np.array([1], dtype=np.uint32) @@ -18,4 +35,32 @@ def reduce_sum_keep_dims(): name = "reduce_sum_keep_dims" make_test( - [x], y, "input_0.reduce_sum(Option::Some(array![1].span()), true, false)", name) + [x], y, "input_0.reduce_sum(Option::Some(array![1].span()), Option::Some(true), Option::None)", name) + + @staticmethod + def reduce_sum_default_axes_keepdims(): + keepdims = 1 + + x = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [ + [9, 10], [11, 12]]]).astype(np.uint32) + y = np.sum(x, axis=None, keepdims=keepdims == 1) + + x = Tensor(Dtype.U32, x.shape, x.flatten()) + y = Tensor(Dtype.U32, y.shape, y.flatten()) + + name = "reduce_sum_default_axes_keepdims" + make_test( + [x], y, "input_0.reduce_sum(Option::Some(array![].span()), Option::Some(true), Option::None)", name) + + @staticmethod + def reduce_sum_empty_axes_input_noop(): + x = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [ + [9, 10], [11, 12]]]).astype(np.uint32) + y = np.array(x) + + x = Tensor(Dtype.U32, x.shape, x.flatten()) + y = Tensor(Dtype.U32, y.shape, y.flatten()) + + name = "reduce_sum_empty_axes_input_noop" + make_test( + [x], y, "input_0.reduce_sum(Option::None, Option::Some(true), Option::Some(true))", name) diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 044698c62..f3a02beb1 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1048,3 +1048,6 @@ // mod reduce_sum_single_axis_u32_2D_keepdims; // mod reduce_sum_single_axis_u32_2D_axis_1; mod reduce_sum_keep_dims; +mod reduce_sum_no_keep_dims; +mod reduce_sum_default_axes_keepdims; +mod reduce_sum_empty_axes_input_noop; diff --git a/tests/nodes/reduce_sum_default_axes_keepdims.cairo b/tests/nodes/reduce_sum_default_axes_keepdims.cairo new file mode 100644 index 000000000..483b37d20 --- /dev/null +++ b/tests/nodes/reduce_sum_default_axes_keepdims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_default_axes_keepdims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum(Option::Some(array![].span()), Option::Some(true), Option::None); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_default_axes_keepdims/input_0.cairo b/tests/nodes/reduce_sum_default_axes_keepdims/input_0.cairo new file mode 100644 index 000000000..2de5818c3 --- /dev/null +++ b/tests/nodes/reduce_sum_default_axes_keepdims/input_0.cairo @@ -0,0 +1,26 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + data.append(12); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_default_axes_keepdims/output_0.cairo b/tests/nodes/reduce_sum_default_axes_keepdims/output_0.cairo new file mode 100644 index 000000000..6cc93d6f7 --- /dev/null +++ b/tests/nodes/reduce_sum_default_axes_keepdims/output_0.cairo @@ -0,0 +1,15 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(78); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_empty_axes_input_noop.cairo b/tests/nodes/reduce_sum_empty_axes_input_noop.cairo new file mode 100644 index 000000000..973479855 --- /dev/null +++ b/tests/nodes/reduce_sum_empty_axes_input_noop.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_empty_axes_input_noop() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum(Option::None, Option::Some(true), Option::Some(true)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_empty_axes_input_noop/input_0.cairo b/tests/nodes/reduce_sum_empty_axes_input_noop/input_0.cairo new file mode 100644 index 000000000..2de5818c3 --- /dev/null +++ b/tests/nodes/reduce_sum_empty_axes_input_noop/input_0.cairo @@ -0,0 +1,26 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + data.append(12); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_empty_axes_input_noop/output_0.cairo b/tests/nodes/reduce_sum_empty_axes_input_noop/output_0.cairo new file mode 100644 index 000000000..d679605a0 --- /dev/null +++ b/tests/nodes/reduce_sum_empty_axes_input_noop/output_0.cairo @@ -0,0 +1,26 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + data.append(12); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_keep_dims.cairo b/tests/nodes/reduce_sum_keep_dims.cairo index c3baad2c0..661d3711f 100644 --- a/tests/nodes/reduce_sum_keep_dims.cairo +++ b/tests/nodes/reduce_sum_keep_dims.cairo @@ -4,8 +4,8 @@ mod output_0; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; use core::array::{ArrayTrait, SpanTrait}; #[test] @@ -14,7 +14,7 @@ fn test_reduce_sum_keep_dims() { let input_0 = input_0::input_0(); let z_0 = output_0::output_0(); - let y_0 = input_0.reduce_sum(Option::Some(array![1].span()), true, false); + let y_0 = input_0.reduce_sum(Option::Some(array![1].span()), Option::Some(true), Option::None); assert_eq(y_0, z_0); } diff --git a/tests/nodes/reduce_sum_no_keep_dims.cairo b/tests/nodes/reduce_sum_no_keep_dims.cairo new file mode 100644 index 000000000..a83405f01 --- /dev/null +++ b/tests/nodes/reduce_sum_no_keep_dims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_no_keep_dims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum(Option::Some(array![1].span()), Option::Some(false), Option::None); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_no_keep_dims/input_0.cairo b/tests/nodes/reduce_sum_no_keep_dims/input_0.cairo new file mode 100644 index 000000000..2de5818c3 --- /dev/null +++ b/tests/nodes/reduce_sum_no_keep_dims/input_0.cairo @@ -0,0 +1,26 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + data.append(12); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_no_keep_dims/output_0.cairo b/tests/nodes/reduce_sum_no_keep_dims/output_0.cairo new file mode 100644 index 000000000..72c71a185 --- /dev/null +++ b/tests/nodes/reduce_sum_no_keep_dims/output_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(4); + data.append(6); + data.append(12); + data.append(14); + data.append(20); + data.append(22); + TensorTrait::new(shape.span(), data.span()) +} From 8578013683e67b1e7667fb2e096a4497a9ed23a3 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Thu, 21 Mar 2024 09:16:56 +0100 Subject: [PATCH 16/68] update doc --- docs/framework/operators/tensor/README.md | 3 +- .../operators/tensor/tensor.reduce_sum.md | 13 ++++--- .../tensor/tensor.reduce_sum_single_axis.md | 39 +++++++++++++++++++ src/operators/tensor/core.cairo | 26 +++++++------ 4 files changed, 62 insertions(+), 19 deletions(-) create mode 100644 docs/framework/operators/tensor/tensor.reduce_sum_single_axis.md diff --git a/docs/framework/operators/tensor/README.md b/docs/framework/operators/tensor/README.md index fe2995096..51c1d6d06 100644 --- a/docs/framework/operators/tensor/README.md +++ b/docs/framework/operators/tensor/README.md @@ -55,7 +55,8 @@ use orion::operators::tensor::TensorTrait; | [`tensor.min_in_tensor`](tensor.min\_in\_tensor.md) | Returns the minimum value in the tensor. | | [`tensor.min`](tensor.min.md) | Returns the minimum value in the tensor. | | [`tensor.max`](tensor.max.md) | Returns the maximum value in the tensor. | -| [`tensor.reduce_sum`](tensor.reduce\_sum.md) | Reduces a tensor by summing its elements along a specified axis. | +| [`tensor.reduce_sum`](tensor.reduce\_sum.md) | Computes the sum of the input tensor's elements along the provided axes. | +| [`tensor.reduce_sum_single_axis`](tensor.reduce\_sum\_single\_axis.md) | Reduces a tensor by summing its elements along a specified axis. | | [`tensor.reduce_prod`](tensor.reduce\_prod.md) | Reduces a tensor to its products along specified axis. | | [`tensor.argmax`](tensor.argmax.md) | Returns the index of the maximum value along the specified axis. | | [`tensor.argmin`](tensor.argmin.md) | Returns the index of the minimum value along the specified axis. | diff --git a/docs/framework/operators/tensor/tensor.reduce_sum.md b/docs/framework/operators/tensor/tensor.reduce_sum.md index 3aa77d2ce..52b49d137 100644 --- a/docs/framework/operators/tensor/tensor.reduce_sum.md +++ b/docs/framework/operators/tensor/tensor.reduce_sum.md @@ -4,13 +4,14 @@ fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; ``` -Reduces a tensor by summing its elements along a specified axis. +Computes the sum of the input tensor's elements along the provided axes ## Args * `self`(`@Tensor`) - The input tensor. -* `axis`(`usize`) - The dimension to reduce. -* `keepdims`(`bool`) - If true, retains reduced dimensions with length 1. +* `axes`(`Option>`) - Optional input list of integers, along which to reduce. +* `keepdims`(`Option`) - If true, retains reduced dimensions with length 1. +* `noop_with_empty_axes`(`Option`) - Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. ## Panics @@ -29,11 +30,11 @@ use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; fn reduce_sum_example() -> Tensor { let tensor = TensorTrait::::new( - shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(), + shape: array![3, 2, 2].span(), data: array![1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12].span(), ); // We can call `reduce_sum` function as follows. - return tensor.reduce_sum(axis: 0, keepdims: false); + return tensor.reduce_sum(Option::Some(array![1].span()), Option::Some(false), Option::None); } ->>> [[4,6],[8,10]] +>>> [[4, 6] [12, 14] [20, 22]] ``` diff --git a/docs/framework/operators/tensor/tensor.reduce_sum_single_axis.md b/docs/framework/operators/tensor/tensor.reduce_sum_single_axis.md new file mode 100644 index 000000000..3eab60dfa --- /dev/null +++ b/docs/framework/operators/tensor/tensor.reduce_sum_single_axis.md @@ -0,0 +1,39 @@ +## tensor.reduce_sum_single_axis + +```rust + fn reduce_sum_single_axis(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; +``` + +Reduces a tensor by summing its elements along a specified axis. + +## Args + +* `self`(`@Tensor`) - The input tensor. +* `axis`(`usize`) - The dimension to reduce. +* `keepdims`(`bool`) - If true, retains reduced dimensions with length 1. + +## Panics + +* Panics if axis is not in the range of the input tensor's dimensions. + +## Returns + +A new `Tensor` instance with the specified axis reduced by summing its elements. + +## Examples + +```rust +use core::array::{ArrayTrait, SpanTrait}; + +use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; + +fn reduce_sum_single_axis_example() -> Tensor { + let tensor = TensorTrait::::new( + shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(), + ); + + // We can call `reduce_sum_single_axis` function as follows. + return tensor.reduce_sum_single_axis(axis: 0, keepdims: false); +} +>>> [[4,6],[8,10]] +``` diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index cb140bd70..c2b9668b1 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -53,7 +53,8 @@ impl TensorSerde, impl TDrop: Drop> of Serde { /// fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; /// ``` /// - /// Reduces a tensor by summing its elements along a specified axis. + /// Computes the sum of the input tensor's elements along the provided axes /// /// ## Args /// /// * `self`(`@Tensor`) - The input tensor. - /// * `axis`(`usize`) - The dimension to reduce. - /// * `keepdims`(`bool`) - If true, retains reduced dimensions with length 1. + /// * `axes`(`Option>`) - Optional input list of integers, along which to reduce. + /// * `keepdims`(`Option`) - If true, retains reduced dimensions with length 1. + /// * `noop_with_empty_axes`(`Option`) - Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. /// /// ## Panics /// @@ -666,13 +668,13 @@ trait TensorTrait { /// /// fn reduce_sum_example() -> Tensor { /// let tensor = TensorTrait::::new( - /// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(), + /// shape: array![3, 2, 2].span(), data: array![1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12].span(), /// ); /// /// // We can call `reduce_sum` function as follows. - /// return tensor.reduce_sum(axis: 0, keepdims: false); + /// return tensor.reduce_sum(Option::Some(array![1].span()), Option::Some(false), Option::None); /// } - /// >>> [[4,6],[8,10]] + /// >>> [[4, 6] [12, 14] [20, 22]] /// ``` /// fn reduce_sum( @@ -681,10 +683,10 @@ trait TensorTrait { keepdims: Option, noop_with_empty_axes: Option ) -> Tensor; - /// ## tensor.reduce_sum + /// ## tensor.reduce_sum_single_axis /// /// ```rust - /// fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; + /// fn reduce_sum_single_axis(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; /// ``` /// /// Reduces a tensor by summing its elements along a specified axis. @@ -710,13 +712,13 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn reduce_sum_example() -> Tensor { + /// fn reduce_sum_single_axis_example() -> Tensor { /// let tensor = TensorTrait::::new( /// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(), /// ); /// - /// // We can call `reduce_sum` function as follows. - /// return tensor.reduce_sum(axis: 0, keepdims: false); + /// // We can call `reduce_sum_single_axis` function as follows. + /// return tensor.reduce_sum_single_axis(axis: 0, keepdims: false); /// } /// >>> [[4,6],[8,10]] /// ``` From 3248afc71da8bbf6645c28d033ef23412d4c9606 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Thu, 21 Mar 2024 09:27:07 +0100 Subject: [PATCH 17/68] restore tests --- tests/lib.cairo | 10 +- tests/nodes.cairo | 2098 ++++++++++++++++++++++----------------------- 2 files changed, 1054 insertions(+), 1054 deletions(-) diff --git a/tests/lib.cairo b/tests/lib.cairo index a61287d92..f5cecb77d 100644 --- a/tests/lib.cairo +++ b/tests/lib.cairo @@ -1,7 +1,7 @@ -// mod numbers; -// mod performance; -// mod tensor_core; +mod numbers; +mod performance; +mod tensor_core; mod nodes; -// mod ml; -// mod operators; +mod ml; +mod operators; diff --git a/tests/nodes.cairo b/tests/nodes.cairo index f3a02beb1..ff888973b 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1,1052 +1,1052 @@ -// mod abs_fp16x16; -// mod abs_fp8x23; -// mod abs_i32; -// mod abs_i8; -// mod acos_fp16x16; -// mod acos_fp8x23; -// mod acosh_fp16x16; -// mod acosh_fp8x23; -// mod add_fp16x16; -// mod add_fp16x16_broadcast; -// mod add_fp8x23; -// mod add_fp8x23_broadcast; -// mod add_i32; -// mod add_i32_broadcast; -// mod add_i8; -// mod add_i8_broadcast; -// mod add_u32; -// mod add_u32_broadcast; -// mod argmax_fp16x16_1D_default; -// mod argmax_fp16x16_1D_keepdims_false; -// mod argmax_fp16x16_1D_last_index; -// mod argmax_fp16x16_2D_default; -// mod argmax_fp16x16_2D_keepdims_false; -// mod argmax_fp16x16_2D_last_index; -// mod argmax_fp16x16_3D_default; -// mod argmax_fp16x16_3D_keepdims_false; -// mod argmax_fp16x16_3D_last_index; -// mod argmax_fp8x23_1D_default; -// mod argmax_fp8x23_1D_keepdims_false; -// mod argmax_fp8x23_1D_last_index; -// mod argmax_fp8x23_2D_default; -// mod argmax_fp8x23_2D_keepdims_false; -// mod argmax_fp8x23_2D_last_index; -// mod argmax_fp8x23_3D_default; -// mod argmax_fp8x23_3D_keepdims_false; -// mod argmax_fp8x23_3D_last_index; -// mod argmax_i32_1D_default; -// mod argmax_i32_1D_keepdims_false; -// mod argmax_i32_1D_last_index; -// mod argmax_i32_2D_default; -// mod argmax_i32_2D_keepdims_false; -// mod argmax_i32_2D_last_index; -// mod argmax_i32_3D_default; -// mod argmax_i32_3D_keepdims_false; -// mod argmax_i32_3D_last_index; -// mod argmax_i8_1D_default; -// mod argmax_i8_1D_keepdims_false; -// mod argmax_i8_1D_last_index; -// mod argmax_i8_2D_default; -// mod argmax_i8_2D_keepdims_false; -// mod argmax_i8_2D_last_index; -// mod argmax_i8_3D_default; -// mod argmax_i8_3D_keepdims_false; -// mod argmax_i8_3D_last_index; -// mod argmax_u32_1D_default; -// mod argmax_u32_1D_keepdims_false; -// mod argmax_u32_1D_last_index; -// mod argmax_u32_2D_default; -// mod argmax_u32_2D_keepdims_false; -// mod argmax_u32_2D_last_index; -// mod argmax_u32_3D_default; -// mod argmax_u32_3D_keepdims_false; -// mod argmax_u32_3D_last_index; -// mod argmin_fp16x16_1D_default; -// mod argmin_fp16x16_1D_keepdims_false; -// mod argmin_fp16x16_1D_last_index; -// mod argmin_fp16x16_2D_default; -// mod argmin_fp16x16_2D_keepdims_false; -// mod argmin_fp16x16_2D_last_index; -// mod argmin_fp16x16_3D_default; -// mod argmin_fp16x16_3D_keepdims_false; -// mod argmin_fp16x16_3D_last_index; -// mod argmin_fp8x23_1D_default; -// mod argmin_fp8x23_1D_keepdims_false; -// mod argmin_fp8x23_1D_last_index; -// mod argmin_fp8x23_2D_default; -// mod argmin_fp8x23_2D_keepdims_false; -// mod argmin_fp8x23_2D_last_index; -// mod argmin_fp8x23_3D_default; -// mod argmin_fp8x23_3D_keepdims_false; -// mod argmin_fp8x23_3D_last_index; -// mod argmin_i32_1D_default; -// mod argmin_i32_1D_keepdims_false; -// mod argmin_i32_1D_last_index; -// mod argmin_i32_2D_default; -// mod argmin_i32_2D_keepdims_false; -// mod argmin_i32_2D_last_index; -// mod argmin_i32_3D_default; -// mod argmin_i32_3D_keepdims_false; -// mod argmin_i32_3D_last_index; -// mod argmin_i8_1D_default; -// mod argmin_i8_1D_keepdims_false; -// mod argmin_i8_1D_last_index; -// mod argmin_i8_2D_default; -// mod argmin_i8_2D_keepdims_false; -// mod argmin_i8_2D_last_index; -// mod argmin_i8_3D_default; -// mod argmin_i8_3D_keepdims_false; -// mod argmin_i8_3D_last_index; -// mod argmin_u32_1D_default; -// mod argmin_u32_1D_keepdims_false; -// mod argmin_u32_1D_last_index; -// mod argmin_u32_2D_default; -// mod argmin_u32_2D_keepdims_false; -// mod argmin_u32_2D_last_index; -// mod argmin_u32_3D_default; -// mod argmin_u32_3D_keepdims_false; -// mod argmin_u32_3D_last_index; -// mod asin_fp16x16; -// mod asin_fp8x23; -// mod asinh_fp16x16; -// mod asinh_fp8x23; -// mod atan_fp16x16; -// mod atan_fp8x23; -// mod ceil_fp16x16; -// mod ceil_fp8x23; -// mod concat_fp16x16_1d; -// mod concat_fp16x16_2d; -// mod concat_fp16x16_3d_default; -// mod concat_fp16x16_3d_axis_1; -// mod concat_fp16x16_3d_axis_2; -// mod concat_fp16x16_3d_three_tensors_axis_1; -// mod concat_fp16x16_3d_three_tensors_axis_2; -// mod concat_fp8x23_1d; -// mod concat_fp8x23_2d; -// mod concat_fp8x23_3d_default; -// mod concat_fp8x23_3d_axis_1; -// mod concat_fp8x23_3d_axis_2; -// mod concat_fp8x23_3d_three_tensors_axis_1; -// mod concat_fp8x23_3d_three_tensors_axis_2; -// mod concat_i32_1d; -// mod concat_i32_2d; -// mod concat_i32_3d_default; -// mod concat_i32_3d_axis_1; -// mod concat_i32_3d_axis_2; -// mod concat_i32_3d_three_tensors_axis_1; -// mod concat_i32_3d_three_tensors_axis_2; -// mod concat_i8_1d; -// mod concat_i8_2d; -// mod concat_i8_3d_default; -// mod concat_i8_3d_axis_1; -// mod concat_i8_3d_axis_2; -// mod concat_i8_3d_three_tensors_axis_1; -// mod concat_i8_3d_three_tensors_axis_2; -// mod concat_u32_1d; -// mod concat_u32_2d; -// mod concat_u32_3d_default; -// mod concat_u32_3d_axis_1; -// mod concat_u32_3d_axis_2; -// mod concat_u32_3d_three_tensors_axis_1; -// mod concat_u32_3d_three_tensors_axis_2; -// mod cos_fp16x16; -// mod cos_fp8x23; -// mod cosh_fp16x16; -// mod cosh_fp8x23; -// mod cumsum_fp16x16_1d_default; -// mod cumsum_fp16x16_1d_exclusive; -// mod cumsum_fp16x16_1d_reverse; -// mod cumsum_fp16x16_1d_reverse_exclusive; -// mod cumsum_fp16x16_2d_axis_0; -// mod cumsum_fp16x16_2d_axis_1; -// mod cumsum_fp8x23_1d_default; -// mod cumsum_fp8x23_1d_exclusive; -// mod cumsum_fp8x23_1d_reverse; -// mod cumsum_fp8x23_1d_reverse_exclusive; -// mod cumsum_fp8x23_2d_axis_0; -// mod cumsum_fp8x23_2d_axis_1; -// mod cumsum_i32_1d_default; -// mod cumsum_i32_1d_exclusive; -// mod cumsum_i32_1d_reverse; -// mod cumsum_i32_1d_reverse_exclusive; -// mod cumsum_i32_2d_axis_0; -// mod cumsum_i32_2d_axis_1; -// mod cumsum_i8_1d_default; -// mod cumsum_i8_1d_exclusive; -// mod cumsum_i8_1d_reverse; -// mod cumsum_i8_1d_reverse_exclusive; -// mod cumsum_i8_2d_axis_0; -// mod cumsum_i8_2d_axis_1; -// mod cumsum_u32_1d_default; -// mod cumsum_u32_1d_exclusive; -// mod cumsum_u32_1d_reverse; -// mod cumsum_u32_1d_reverse_exclusive; -// mod cumsum_u32_2d_axis_0; -// mod cumsum_u32_2d_axis_1; -// mod div_fp16x16; -// mod div_fp16x16_broadcast; -// mod div_fp8x23; -// mod div_fp8x23_broadcast; -// mod div_i32; -// mod div_i32_broadcast; -// mod div_i8; -// mod div_i8_broadcast; -// mod div_u32; -// mod div_u32_broadcast; -// mod equal_fp16x16; -// mod equal_fp16x16_broadcast; -// mod equal_fp8x23; -// mod equal_fp8x23_broadcast; -// mod equal_i32; -// mod equal_i32_broadcast; -// mod equal_i8; -// mod equal_i8_broadcast; -// mod equal_u32; -// mod equal_u32_broadcast; -// mod exp_fp16x16; -// mod exp_fp8x23; -// mod less_equal_fp16x16; -// mod less_equal_fp16x16_broadcast; -// mod less_equal_fp8x23; -// mod less_equal_fp8x23_broadcast; -// mod less_equal_i32; -// mod less_equal_i32_broadcast; -// mod less_equal_i8; -// mod less_equal_i8_broadcast; -// mod less_equal_u32; -// mod less_equal_u32_broadcast; -// mod greater_fp16x16; -// mod greater_fp16x16_broadcast; -// mod greater_fp8x23; -// mod greater_fp8x23_broadcast; -// mod greater_i32; -// mod greater_i32_broadcast; -// mod greater_i8; -// mod greater_i8_broadcast; -// mod greater_u32; -// mod greater_u32_broadcast; -// mod leaky_relu_fp16x16; -// mod leaky_relu_fp8x23; -// mod linear_fp16x16; -// mod linear_fp8x23; -// mod linear_i32; -// mod linear_i8; -// mod linear_u32; -// mod log_fp16x16; -// mod log_fp8x23; -// mod logsoftmax_fp16x16_axis_0; -// mod logsoftmax_fp16x16_axis_1; -// mod logsoftmax_fp8x23_axis_0; -// mod logsoftmax_fp8x23_axis_1; -// mod matmul_fp16x16_1d; -// mod matmul_fp16x16_2x2; -// mod matmul_fp16x16_2x1; -// mod matmul_fp16x16_1x2; -// mod matmul_fp8x23_1d; -// mod matmul_fp8x23_2x2; -// mod matmul_fp8x23_2x1; -// mod matmul_fp8x23_1x2; -// mod matmul_i32_1d; -// mod matmul_i32_2x2; -// mod matmul_i32_2x1; -// mod matmul_i32_1x2; -// mod matmul_i8_1d; -// mod matmul_i8_2x2; -// mod matmul_i8_2x1; -// mod matmul_i8_1x2; -// mod matmul_u32_1d; -// mod matmul_u32_2x2; -// mod matmul_u32_2x1; -// mod matmul_u32_1x2; -// mod mul_fp16x16; -// mod mul_fp16x16_broadcast; -// mod mul_fp8x23; -// mod mul_fp8x23_broadcast; -// mod mul_i32; -// mod mul_i32_broadcast; -// mod mul_i8; -// mod mul_i8_broadcast; -// mod mul_u32; -// mod mul_u32_broadcast; -// mod or_fp16x16; -// mod or_fp16x16_broadcast; -// mod or_fp8x23; -// mod or_fp8x23_broadcast; -// mod or_i32; -// mod or_i32_broadcast; -// mod or_i8; -// mod or_i8_broadcast; -// mod or_u32; -// mod or_u32_broadcast; -// mod relu_fp16x16; -// mod relu_fp8x23; -// mod relu_i32; -// mod relu_i8; -// mod sigmoid_fp16x16; -// mod sigmoid_fp8x23; -// mod sin_fp16x16; -// mod sin_fp8x23; -// mod sinh_fp16x16; -// mod sinh_fp8x23; -// mod softmax_fp16x16; -// mod softmax_fp8x23; -// mod softplus_fp8x23; -// mod softplus_fp16x16; -// mod softsign_fp8x23; -// mod softsign_fp16x16; -// mod sqrt_fp16x16; -// mod sqrt_fp8x23; -// mod sub_fp16x16; -// mod sub_fp16x16_broadcast; -// mod sub_fp8x23; -// mod sub_fp8x23_broadcast; -// mod sub_i32; -// mod sub_i32_broadcast; -// mod sub_i8; -// mod sub_i8_broadcast; -// mod sub_u32; -// mod sub_u32_broadcast; -// mod tanh_fp16x16; -// mod tanh_fp8x23; -// mod transpose_fp16x16_2d; -// mod transpose_fp16x16_3d; -// mod transpose_fp8x23_2d; -// mod transpose_fp8x23_3d; -// mod transpose_i32_2d; -// mod transpose_i32_3d; -// mod transpose_i8_2d; -// mod transpose_i8_3d; -// mod transpose_u32_2d; -// mod transpose_u32_3d; -// mod xor_fp16x16; -// mod xor_fp16x16_broadcast; -// mod xor_fp8x23; -// mod xor_fp8x23_broadcast; -// mod xor_i32; -// mod xor_i32_broadcast; -// mod xor_i8; -// mod xor_i8_broadcast; -// mod xor_u32; -// mod xor_u32_broadcast; -// mod less_fp16x16; -// mod less_fp16x16_broadcast; -// mod less_fp8x23; -// mod less_fp8x23_broadcast; -// mod less_i32; -// mod less_i32_broadcast; -// mod less_i8; -// mod less_i8_broadcast; -// mod less_u32; -// mod less_u32_broadcast; -// mod greater_equal_fp16x16; -// mod greater_equal_fp16x16_broadcast; -// mod greater_equal_fp8x23; -// mod greater_equal_fp8x23_broadcast; -// mod greater_equal_i32; -// mod greater_equal_i32_broadcast; -// mod greater_equal_i8; -// mod greater_equal_i8_broadcast; -// mod greater_equal_u32; -// mod greater_equal_u32_broadcast; -// mod slice_fp16x16_2d; -// mod slice_fp16x16_3d; -// mod slice_fp8x23_2d; -// mod slice_fp8x23_3d; -// mod slice_i32_2d; -// mod slice_i32_3d; -// mod slice_i8_2d; -// mod slice_i8_3d; -// mod slice_u32_2d; -// mod slice_u32_3d; -// mod gather_fp8x23_3d_default; -// mod gather_fp8x23_3d_axis1; -// mod gather_fp8x23_3d_axis2; -// mod gather_fp16x16_3d_default; -// mod gather_fp16x16_3d_axis1; -// mod gather_fp16x16_3d_axis2; -// mod gather_i8_3d_default; -// mod gather_i8_3d_axis1; -// mod gather_i8_3d_axis2; -// mod gather_i32_3d_default; -// mod gather_i32_3d_axis1; -// mod gather_i32_3d_axis2; -// mod gather_u32_3d_default; -// mod gather_u32_3d_axis1; -// mod gather_u32_3d_axis2; -// mod nonzero_fp16x16_2d; -// mod nonzero_fp16x16_3d; -// mod nonzero_fp8x23_2d; -// mod nonzero_fp8x23_3d; -// mod nonzero_i32_2d; -// mod nonzero_i32_3d; -// mod nonzero_i8_2d; -// mod nonzero_i8_3d; -// mod nonzero_u32_2d; -// mod nonzero_u32_3d; -// mod squeeze_fP16x16; -// mod squeeze_fP8x23; -// mod squeeze_i32; -// mod squeeze_i8; -// mod squeeze_u32; -// mod unsqueeze_fp16x16_2d; -// mod unsqueeze_fp16x16_3d; -// mod unsqueeze_fp8x23_2d; -// mod unsqueeze_fp8x23_3d; -// mod unsqueeze_i32_2d; -// mod unsqueeze_i32_3d; -// mod unsqueeze_i8_2d; -// mod unsqueeze_i8_3d; -// mod unsqueeze_u32_2d; -// mod unsqueeze_u32_3d; -// mod sign_fP16x16; -// mod sign_fP8x23; -// mod sign_fail; -// mod sign_i32; -// mod sign_i8; -// mod clip_fp16x16_2d; -// mod clip_fp16x16_3d; -// mod clip_fp8x23_2d; -// mod clip_fp8x23_3d; -// mod clip_i32_2d; -// mod clip_i32_3d; -// mod clip_i8_2d; -// mod clip_i8_3d; -// mod clip_u32_2d; -// mod clip_u32_3d; -// mod identity_fP16x16; -// mod identity_fP8x23; -// mod identity_i32; -// mod identity_i8; -// mod identity_u32; -// mod thresholded_relu_fp16x16; -// mod thresholded_relu_fp8x23; -// mod hard_sigmoid_fp8x23; -// mod hard_sigmoid_fp16x16; -// mod neg_fp16x16; -// mod neg_fp8x23; -// mod neg_i32; -// mod neg_i8; -// mod gemm_all_attributes; -// mod gemm_alpha; -// mod gemm_beta; -// mod gemm_default_matrix_bias; -// mod gemm_default_vector_bias; -// mod gemm_default_no_bias; -// mod gemm_transposeA; -// mod gemm_transposeB; -// mod min_fp16x16_three_tensors; -// mod min_fp16x16_broadcast_three_tensors; -// mod min_fp16x16_two_tensors; -// mod min_fp16x16_broadcast_two_tensors; -// mod min_fp8x23_three_tensors; -// mod min_fp8x23_broadcast_three_tensors; -// mod min_fp8x23_two_tensors; -// mod min_fp8x23_broadcast_two_tensors; -// mod min_i32_three_tensors; -// mod min_i32_broadcast_three_tensors; -// mod min_i32_two_tensors; -// mod min_i32_broadcast_two_tensors; -// mod min_i8_three_tensors; -// mod min_i8_broadcast_three_tensors; -// mod min_i8_two_tensors; -// mod min_i8_broadcast_two_tensors; -// mod min_u32_three_tensors; -// mod min_u32_broadcast_three_tensors; -// mod min_u32_two_tensors; -// mod min_u32_broadcast_two_tensors; -// mod where_fp16x16; -// mod where_fp16x16_broadcast; -// mod where_fp8x23; -// mod where_fp8x23_broadcast; -// mod where_i32; -// mod where_i32_broadcast; -// mod where_i8; -// mod where_i8_broadcast; -// mod where_u32; -// mod where_u32_broadcast; -// mod not_bool; -// mod round_fp16x16; -// mod round_fp8x23; -// mod max_fp16x16_three_tensors; -// mod max_fp16x16_broadcast_three_tensors; -// mod max_fp16x16_two_tensors; -// mod max_fp16x16_broadcast_two_tensors; -// mod max_fp8x23_three_tensors; -// mod max_fp8x23_broadcast_three_tensors; -// mod max_fp8x23_two_tensors; -// mod max_fp8x23_broadcast_two_tensors; -// mod max_i32_three_tensors; -// mod max_i32_broadcast_three_tensors; -// mod max_i32_two_tensors; -// mod max_i32_broadcast_two_tensors; -// mod max_i8_three_tensors; -// mod max_i8_broadcast_three_tensors; -// mod max_i8_two_tensors; -// mod max_i8_broadcast_two_tensors; -// mod max_u32_three_tensors; -// mod max_u32_broadcast_three_tensors; -// mod max_u32_two_tensors; -// mod max_u32_broadcast_two_tensors; -// mod scatter_fp16x16_3d_default; -// mod scatter_fp16x16_3d_axis1; -// mod scatter_fp16x16_3d_axis1_add; -// mod scatter_fp8x23_default; -// mod scatter_fp8x23_axis1; -// mod scatter_fp8x23_mul; -// mod scatter_i8_default; -// mod scatter_i8_axis1; -// mod scatter_i8_axis1_max; -// mod scatter_u32_default; -// mod scatter_u32_axis1; -// mod scatter_u32_add; -// mod array_feature_extractor_1D_i32; -// mod array_feature_extractor_1D_fp8x23; -// mod array_feature_extractor_1D_fp16x16; -// mod array_feature_extractor_2D_i32; -// mod array_feature_extractor_2D_fp8x23; -// mod array_feature_extractor_2D_fp16x16; -// mod array_feature_extractor_3D_i32; -// mod array_feature_extractor_3D_fp8x23; -// mod array_feature_extractor_3D_fp16x16; -// mod binarizer_fp16x16; -// mod binarizer_fp8x23; -// mod tril_fp16x16; -// mod tril_fp16x16_neg; -// mod tril_fp16x16_one_row; -// mod tril_fp16x16_out_neg; -// mod tril_fp16x16_out_pos; -// mod tril_fp16x16_pos; -// mod tril_fp16x16_square; -// mod tril_fp16x16_square_neg; -// mod tril_fp16x16_zero; -// mod triu_fp16x16; -// mod triu_fp16x16_neg; -// mod triu_fp16x16_one_row; -// mod triu_fp16x16_out_neg; -// mod triu_fp16x16_out_pos; -// mod triu_fp16x16_pos; -// mod triu_fp16x16_square; -// mod triu_fp16x16_square_neg; -// mod triu_fp16x16_zero; -// mod tril_fp8x23; -// mod tril_fp8x23_neg; -// mod tril_fp8x23_one_row; -// mod tril_fp8x23_out_neg; -// mod tril_fp8x23_out_pos; -// mod tril_fp8x23_pos; -// mod tril_fp8x23_square; -// mod tril_fp8x23_square_neg; -// mod tril_fp8x23_zero; -// mod triu_fp8x23; -// mod triu_fp8x23_neg; -// mod triu_fp8x23_one_row; -// mod triu_fp8x23_out_neg; -// mod triu_fp8x23_out_pos; -// mod triu_fp8x23_pos; -// mod triu_fp8x23_square; -// mod triu_fp8x23_square_neg; -// mod triu_fp8x23_zero; -// mod tril_i32; -// mod tril_neg_i32; -// mod tril_i32_one_row; -// mod tril_i32_out_neg; -// mod tril_i32_out_pos; -// mod tril_i32_pos; -// mod tril_i32_square; -// mod tril_i32_square_neg; -// mod tril_i32_zero; -// mod triu_i32; -// mod triu_i32_neg; -// mod triu_i32_one_row; -// mod triu_i32_out_neg; -// mod triu_i32_out_pos; -// mod triu_i32_pos; -// mod triu_i32_square; -// mod triu_i32_square_neg; -// mod triu_i32_zero; -// mod tril_i8; -// mod tril_i8_neg; -// mod tril_i8_one_row; -// mod tril_i8_out_neg; -// mod tril_i8_out_pos; -// mod tril_i8_pos; -// mod tril_i8_square; -// mod tril_i8_square_neg; -// mod tril_i8_zero; -// mod triu_i8; -// mod triu_i8_neg; -// mod triu_i8_one_row; -// mod triu_i8_out_neg; -// mod triu_i8_out_pos; -// mod triu_i8_pos; -// mod triu_i8_square; -// mod triu_i8_square_neg; -// mod triu_i8_zero; -// mod tril_u32; -// mod tril_u32_neg; -// mod tril_u32_one_row; -// mod tril_u32_out_neg; -// mod tril_u32_out_pos; -// mod tril_u32_pos; -// mod tril_u32_square; -// mod tril_u32_square_neg; -// mod tril_u32_zero; -// mod triu_u32; -// mod triu_u32_neg; -// mod triu_u32_one_row; -// mod triu_u32_out_neg; -// mod triu_u32_out_pos; -// mod triu_u32_pos; -// mod triu_u32_square; -// mod triu_u32_square_neg; -// mod triu_u32_zero; -// mod reduce_sum_square_fp16x16_export_do_not_keepdims; -// mod reduce_sum_square_fp16x16_export_keepdims; -// mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; -// mod reduce_sum_square_fp8x23_export_do_not_keepdims; -// mod reduce_sum_square_fp8x23_export_keepdims; -// mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; -// mod reduce_sum_square_i32_export_do_not_keepdims; -// mod reduce_sum_square_i32_export_keepdims; -// mod reduce_sum_square_i32_export_negative_axes_keepdims; -// mod reduce_sum_square_i8_export_do_not_keepdims; -// mod reduce_sum_square_i8_export_keepdims; -// mod reduce_sum_square_i8_export_negative_axes_keepdims; -// mod reduce_sum_square_u32_export_do_not_keepdims; -// mod reduce_sum_square_u32_export_keepdims; -// mod reduce_sum_square_u32_export_negative_axes_keepdims; -// mod reduce_l2_fp16x16_export_do_not_keepdims; -// mod reduce_l2_fp16x16_export_keepdims; -// mod reduce_l2_fp16x16_export_negative_axes_keepdims; -// mod reduce_l2_fp8x23_export_do_not_keepdims; -// mod reduce_l2_fp8x23_export_keepdims; -// mod reduce_l2_fp8x23_export_negative_axes_keepdims; -// mod reduce_l1_fp16x16_export_do_not_keepdims; -// mod reduce_l1_fp16x16_export_keepdims; -// mod reduce_l1_fp16x16_export_negative_axes_keepdims; -// mod reduce_l1_fp8x23_export_do_not_keepdims; -// mod reduce_l1_fp8x23_export_keepdims; -// mod reduce_l1_fp8x23_export_negative_axes_keepdims; -// mod reduce_l1_i32_export_do_not_keepdims; -// mod reduce_l1_i32_export_keepdims; -// mod reduce_l1_i32_export_negative_axes_keepdims; -// mod reduce_l1_i8_export_do_not_keepdims; -// mod reduce_l1_i8_export_keepdims; -// mod reduce_l1_i8_export_negative_axes_keepdims; -// mod reduce_l1_u32_export_do_not_keepdims; -// mod reduce_l1_u32_export_keepdims; -// mod reduce_l1_u32_export_negative_axes_keepdims; -// mod reduce_prod_fp16x16_1D; -// mod reduce_prod_fp16x16_2D_default; -// mod reduce_prod_fp16x16_2D_keepdims; -// mod reduce_prod_fp16x16_2D_axis_1; -// mod reduce_prod_fp8x23_1D; -// mod reduce_prod_fp8x23_2D_default; -// mod reduce_prod_fp8x23_2D_keepdims; -// mod reduce_prod_fp8x23_2D_axis_1; -// mod reduce_prod_i32_1D; -// mod reduce_prod_i32_2D_default; -// mod reduce_prod_i32_2D_keepdims; -// mod reduce_prod_i32_2D_axis_1; -// mod reduce_prod_i8_1D; -// mod reduce_prod_i8_2D_default; -// mod reduce_prod_i8_2D_keepdims; -// mod reduce_prod_i8_2D_axis_1; -// mod reduce_prod_u32_1D; -// mod reduce_prod_u32_2D_default; -// mod reduce_prod_u32_2D_keepdims; -// mod reduce_prod_u32_2D_axis_1; -// mod gather_elements_fp16x16_3d_default; -// mod gather_elements_fp16x16_3d_axis1; -// mod gather_elements_fp16x16_3d_axis2; -// mod gather_elements_fp8x23_3d_default; -// mod gather_elements_fp8x23_3d_axis1; -// mod gather_elements_fp8x23_3d_axis2; -// mod gather_elements_i8_3d_default; -// mod gather_elements_i8_3d_axis1; -// mod gather_elements_i32_3d_default; -// mod gather_elements_i32_3d_axis1; -// mod gather_elements_i32_3d_axis2; -// mod gather_elements_u32_default; -// mod gather_elements_u32_axis1; -// mod gather_elements_u32_axis2; -// mod gather_elements_u32_axis3; -// mod sequence_length_fp16x16; -// mod sequence_length_fp16x16_broadcast; -// mod sequence_length_fp8x23; -// mod sequence_length_fp8x23_broadcast; -// mod sequence_length_i32; -// mod sequence_length_i32_broadcast; -// mod sequence_length_i8; -// mod sequence_length_i8_broadcast; -// mod sequence_length_u32; -// mod sequence_length_u32_broadcast; -// mod sequence_at_u32_positive; -// mod sequence_at_u32_negative; -// mod sequence_at_fp16x16_positive; -// mod sequence_at_fp16x16_negative; -// mod sequence_at_fp8x23_positive; -// mod sequence_at_fp8x23_negative; -// mod sequence_at_i32_positive; -// mod sequence_at_i32_negative; -// mod sequence_at_i8_positive; -// mod sequence_at_i8_negative; -// mod reduce_min_fp16x16_1D; -// mod reduce_min_fp16x16_2D_default; -// mod reduce_min_fp16x16_2D_keepdims; -// mod reduce_min_fp16x16_2D_axis_1; -// mod reduce_min_fp8x23_1D; -// mod reduce_min_fp8x23_2D_default; -// mod reduce_min_fp8x23_2D_keepdims; -// mod reduce_min_fp8x23_2D_axis_1; -// mod reduce_min_i32_1D; -// mod reduce_min_i32_2D_default; -// mod reduce_min_i32_2D_keepdims; -// mod reduce_min_i32_2D_axis_1; -// mod reduce_min_i8_1D; -// mod reduce_min_i8_2D_default; -// mod reduce_min_i8_2D_keepdims; -// mod reduce_min_i8_2D_axis_1; -// mod reduce_min_u32_1D; -// mod reduce_min_u32_2D_default; -// mod reduce_min_u32_2D_keepdims; -// mod reduce_min_u32_2D_axis_1; -// mod sequence_construct_fp16x16; -// mod sequence_construct_fp8x23; -// mod sequence_construct_i32; -// mod sequence_construct_i8; -// mod sequence_construct_u32; -// mod shrink_hard_fp16x16; -// mod shrink_soft_fp16x16; -// mod shrink_hard_fp8x23; -// mod shrink_soft_fp8x23; -// mod sequence_empty_fp16x16; -// mod sequence_empty_fp8x23; -// mod sequence_empty_i32; -// mod sequence_empty_i8; -// mod sequence_empty_u32; -// mod reduce_mean_fp16x16_1D; -// mod reduce_mean_fp16x16_2D_default; -// mod reduce_mean_fp16x16_2D_keepdims; -// mod reduce_mean_fp16x16_2D_axis_1; -// mod reduce_mean_fp8x23_1D; -// mod reduce_mean_fp8x23_2D_default; -// mod reduce_mean_fp8x23_2D_keepdims; -// mod reduce_mean_fp8x23_2D_axis_1; -// mod reduce_mean_i32_1D; -// mod reduce_mean_i32_2D_default; -// mod reduce_mean_i32_2D_keepdims; -// mod reduce_mean_i32_2D_axis_1; -// mod reduce_mean_i8_1D; -// mod reduce_mean_i8_2D_default; -// mod reduce_mean_i8_2D_keepdims; -// mod reduce_mean_i8_2D_axis_1; -// mod reduce_mean_u32_1D; -// mod reduce_mean_u32_2D_default; -// mod reduce_mean_u32_2D_keepdims; -// mod reduce_mean_u32_2D_axis_1; -// mod pow_fp16x16; -// mod pow_fp16x16_broadcast; -// mod pow_fp8x23; -// mod pow_fp8x23_broadcast; -// mod sequence_erase_u32_positive; -// mod sequence_erase_u32_negative; -// mod sequence_erase_u32_empty; -// mod sequence_erase_fp16x16_positive; -// mod sequence_erase_fp16x16_negative; -// mod sequence_erase_fp16x16_empty; -// mod sequence_erase_fp8x23_positive; -// mod sequence_erase_fp8x23_negative; -// mod sequence_erase_fp8x23_empty; -// mod sequence_erase_i32_positive; -// mod sequence_erase_i32_negative; -// mod sequence_erase_i32_empty; -// mod sequence_erase_i8_positive; -// mod sequence_erase_i8_negative; -// mod sequence_erase_i8_empty; -// mod sequence_insert_fp16x16; -// mod sequence_insert_fp8x23; -// mod sequence_insert_i32; -// mod sequence_insert_i8; -// mod sequence_insert_u32; -// mod concat_from_sequence_fp8x23_new_axis_zero; -// mod concat_from_sequence_fp8x23_new_axis_one; -// mod concat_from_sequence_fp8x23_new_axis_default; -// mod concat_from_sequence_fp16x16_new_axis_zero; -// mod concat_from_sequence_fp16x16_new_axis_one; -// mod concat_from_sequence_fp16x16_new_axis_default; -// mod concat_from_sequence_i32_new_axis_zero; -// mod concat_from_sequence_i32_new_axis_one; -// mod concat_from_sequence_i32_new_axis_default; -// mod concat_from_sequence_i8_new_axis_zero; -// mod concat_from_sequence_i8_new_axis_one; -// mod concat_from_sequence_i8_new_axis_default; -// mod concat_from_sequence_u32_new_axis_zero; -// mod concat_from_sequence_u32_new_axis_one; -// mod concat_from_sequence_u32_new_axis_default; -// mod is_nan_fp16x16; -// mod is_nan_fp8x23; -// mod is_inf_fp16x16; -// mod is_inf_fp8x23; -// mod is_inf_i32; -// mod is_inf_i8; -// mod is_inf_u32; -// mod is_pos_inf_fp16x16; -// mod is_neg_inf_fp16x16; -// mod is_pos_inf_fp8x23; -// mod is_neg_inf_fp8x23; -// mod is_pos_inf_i32; -// mod is_neg_inf_i32; -// mod is_pos_inf_i8; -// mod is_neg_inf_i8; -// mod reduce_log_sum_fp8x23_export_do_not_keepdims; -// mod reduce_log_sum_fp8x23_export_keepdims; -// mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; -// mod reduce_log_sum_fp16x16_export_do_not_keepdims; -// mod reduce_log_sum_fp16x16_export_keepdims; -// mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; -// mod and_bool; -// mod erf_fp16x16; -// mod erf_fp8x23; -// mod unique_fp16x16_without_axis_sorted; -// mod unique_fp16x16_with_axis_zero_sorted; -// mod unique_u32_without_axis_sorted; -// mod unique_u32_without_axis_not_sorted; -// mod unique_u32_with_axis_zero_sorted; -// mod unique_u32_with_axis_zero_not_sorted; -// mod unique_u32_with_axis_one_sorted; -// mod unique_u32_with_axis_one_not_sorted; -// mod gather_nd_fp16x16_3d_default; -// mod gather_nd_fp16x16_3d_batch_dims1; -// mod gather_nd_fp16x16_3d_batch_dims2; -// mod gather_nd_fp8x23_3d_default; -// mod gather_nd_fp8x23_3d_batch_dims1; -// mod gather_nd_fp8x23_3d_batch_dims2; -// mod gather_nd_i32_3d_default; -// mod gather_nd_i32_3d_batch_dims1; -// mod gather_nd_i32_3d_batch_dims2; -// mod gather_nd_i8_3d_default; -// mod gather_nd_i8_3d_batch_dims1; -// mod gather_nd_u32_default; -// mod gather_nd_u32_batch_dims1; -// mod gather_nd_u32_batch_dims2; -// mod resize_upsample_scales_nearest; -// mod resize_downsample_scales_cubic; -// mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; -// mod resize_downsample_scales_cubic_align_corners; -// mod resize_upsample_scales_linear; -// mod resize_downsample_scales_linear_align_corners; -// mod resize_downsample_scales_nearest; -// mod resize_upsample_scales_cubic; -// mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; -// mod resize_upsample_scales_cubic_align_corners; -// mod resize_upsample_scales_cubic_asymmetric; -// mod resize_upsample_scales_linear_align_corners; -// mod resize_upsample_sizes_nearest; -// mod resize_upsample_sizes_cubic; -// mod resize_downsample_sizes_cubic; -// mod resize_downsample_sizes_nearest; -// mod resize_upsample_scales_linear_half_pixel_symmetric; -// mod resize_downsample_scales_cubic_antialias; -// mod resize_downsample_scales_linear_antialias; -// mod resize_downsample_sizes_cubic_antialias; -// mod resize_downsample_sizes_linear_pytorch_half_pixel; -// mod resize_tf_crop_and_resize; -// mod resize_tf_crop_and_resize_extrapolation_value; -// mod resize_upsample_scales_nearest_axes_2_3; -// mod resize_upsample_scales_nearest_axes_3_2; -// mod resize_upsample_sizes_nearest_axes_2_3; -// mod resize_upsample_sizes_nearest_ceil_half_pixel; -// mod resize_upsample_sizes_nearest_floor_align_corners; -// mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; -// mod resize_downsample_scales_linear_half_pixel_symmetric; -// mod resize_downsample_sizes_nearest_not_larger; -// mod resize_downsample_sizes_nearest_not_smaller; -// mod resize_tf_crop_and_resize_axes_2_3; -// mod resize_tf_crop_and_resize_axes_3_2; -// mod resize_upsample_sizes_nearest_axes_3_2; -// mod resize_upsample_sizes_nearest_not_larger; -// mod resize_upsample_sizes_nearest_not_smaller; -// mod compress_fp16x16_3d_default; -// mod compress_fp16x16_3d_axis1; -// mod compress_fp16x16_3d_axis2; -// mod compress_fp16x16_3d_axis3; -// mod compress_fp16x16_3d_noaxis; -// mod compress_fp8x23_3d_default; -// mod compress_fp8x23_3d_axis1; -// mod compress_fp8x23_3d_axis2; -// mod compress_i32_3d_default; -// mod compress_i32_3d_axis1; -// mod compress_i32_3d_axis2; -// mod compress_i8_3d_default; -// mod compress_i8_3d_axis1; -// mod compress_i8_3d_axis2; -// mod compress_u32_3d_default; -// mod compress_u32_3d_axis1; -// mod compress_u32_3d_axis2; -// mod compress_u32_3d_axis2_2; -// mod compress_u32_3d_axis3; -// mod reduce_log_sum_exp_fp32x32_export_do_not_keepdims; -// mod reduce_log_sum_exp_fp32x32_export_keepdims; -// mod reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims; -// mod layer_normalization_default_axis; -// mod layer_normalization_4d_axis0; -// mod layer_normalization_4d_axis1; -// mod layer_normalization_4d_axis2; -// mod layer_normalization_4d_axis3; -// mod layer_normalization_3d_axis0_epsilon; -// mod layer_normalization_3d_axis_negative_3_epsilon; -// mod layer_normalization_3d_axis1_epsilon; -// mod layer_normalization_3d_axis2_epsilon; -// mod layer_normalization_4d_axis_negative_4; -// mod layer_normalization_4d_axis_negative_3; -// mod layer_normalization_4d_axis_negative_2; -// mod layer_normalization_4d_axis_negative_1; -// mod layer_normalization_3d_axis_negative_2_epsilon; -// mod layer_normalization_3d_axis_negative_1_epsilon; -// mod layer_normalization_test; -// mod split_u32_1d_equal_parts; -// mod split_u32_2d_equal_parts; -// mod split_u32_zero_size; -// mod split_u32_1d_variable_parts; -// mod split_u32_2d_variable_parts; -// mod split_u32_1d_uneven; -// mod split_u32_2d_uneven; -// mod split_fp16x16_1d_equal_parts; -// mod split_fp16x16_1d_variable_parts; -// mod split_fp16x16_2d_equal_parts; -// mod split_fp16x16_2d_variable_parts; -// mod split_fp16x16_zero_size; -// mod split_fp16x16_1d_uneven; -// mod split_fp16x16_2d_uneven; -// mod grid_sample; -// mod grid_sample_cubic; -// mod grid_sample_aligncorners; -// mod grid_sample_nearest; -// mod grid_sample_nearest_aligncorner; -// mod grid_sample_padding_border; -// mod grid_sample_padding_reflection; -// mod grid_sample_padding_zeros; -// mod col2im; -// mod col2im_5D; -// mod col2im_dilations; -// mod col2im_pads; -// mod col2im_strides; -// mod random_uniform_like_fp16x16; -// mod random_uniform_like_fp8x23; -// mod range_fp8x23; -// mod range_fp16x16; -// mod range_i32; -// mod range_i8; -// mod range_u32; -// mod hann_window_fp8x23; -// mod hann_window_fp16x16; -// mod hamming_window_fp16x16; -// mod hamming_window_fp8x23; -// mod blackman_window_fp16x16; -// mod blackman_window_fp8x23; -// mod split_to_sequence_fp16x16_1d_equal_parts; -// mod split_to_sequence_fp16x16_1d_variable_parts; -// mod split_to_sequence_fp16x16_2d_equal_parts; -// mod split_to_sequence_fp16x16_2d_variable_parts; -// mod split_to_sequence_fp16x16_zero_size; -// mod split_to_sequence_fp16x16_1d_uneven; -// mod split_to_sequence_fp16x16_2d_uneven; -// mod split_to_sequence_u32_1d_equal_parts; -// mod split_to_sequence_u32_1d_variable_parts; -// mod split_to_sequence_u32_2d_equal_parts; -// mod split_to_sequence_u32_2d_variable_parts; -// mod split_to_sequence_u32_zero_size; -// mod split_to_sequence_u32_1d_uneven; -// mod split_to_sequence_u32_2d_uneven; -// mod split_to_sequence_2d_scalar; -// mod split_to_sequence_2d_nokeepdims; -// mod split_to_sequence_1d_nokeepdims; -// mod reverse_sequence_fp16x16_batch_equal_parts; -// mod reverse_sequence_fp16x16_time_equal_parts; -// mod reverse_sequence_i32_batch_equal_parts; -// mod reverse_sequence_i32_time_equal_parts; -// mod reverse_sequence_i8_batch_equal_parts; -// mod reverse_sequence_i8_time_equal_parts; -// mod reverse_sequence_u32_4x4_batch; -// mod reverse_sequence_u32_4x4_time; -// mod reverse_sequence_u32_3x3_batch; -// mod reverse_sequence_u32_3x3_time; -// mod reverse_sequence_different_dimensions_4_5; -// mod reverse_sequence_different_dimensions_2_4; -// mod reverse_sequence_different_dimensions_1_6; -// mod reverse_sequence_different_dimensions_3x9_batch; -// mod reverse_sequence_different_dimensions_3x9_time; -// mod conv_transpose; -// mod conv_transpose_1d; -// mod conv_transpose_3d; -// mod conv_transpose_attributes; -// mod conv_transpose_autopad_same; -// mod conv_transpose_dilations; -// mod conv_transpose_pads; -// mod conv_transpose_group_2; -// mod conv_transpose_group_2_image_3; -// mod depth_to_space_fp16x16; -// mod depth_to_space_fp8x23; -// mod depth_to_space_i32; -// mod depth_to_space_i8; -// mod depth_to_space_u32; -// mod space_to_depth_fp16x16; -// mod space_to_depth_fp8x23; -// mod space_to_depth_i32; -// mod space_to_depth_i8; -// mod space_to_depth_u32; -// mod scatter_nd_fp16x16_3d_default; -// mod scatter_nd_fp16x16_3d_add; -// mod scatter_nd_fp16x16_3d_mul; -// mod scatter_nd_fp16x16_3d_max; -// mod scatter_nd_fp16x16_3d_min; -// mod scatter_nd_fp8x23_3d_default; -// mod scatter_nd_fp8x23_3d_add; -// mod scatter_nd_fp8x23_3d_mul; -// mod scatter_nd_fp8x23_3d_max; -// mod scatter_nd_fp8x23_3d_min; -// mod scatter_nd_u32_default; -// mod scatter_nd_u32_add; -// mod scatter_nd_u32_mul; -// mod scatter_nd_u32_max; -// mod scatter_nd_u32_min; -// mod conv_2D_with_padding; -// mod conv_1D_no_padding; -// mod conv_1D_with_padding; -// mod conv_3D_no_padding; -// mod conv_3D_with_padding; -// mod conv_4D_no_padding; -// mod conv_2D_with_2_groups; -// mod conv_2D_with_autopad_same; -// mod conv_2D_with_strides_asymmetric_padding; -// mod conv_2D_with_strides_with_padding; -// mod conv_4D_with_padding; -// mod label_encoder_fp16x16_3d_default; -// mod label_encoder_fp8x23_default; -// mod label_encoder_i8_default; -// mod label_encoder_i32_default; -// mod label_encoder_u32_default; -// mod reduce_sum_single_axis_fp16x16_1D; -// mod reduce_sum_single_axis_fp16x16_2D_default; -// mod reduce_sum_single_axis_fp16x16_2D_keepdims; -// mod reduce_sum_single_axis_fp16x16_2D_axis_1; -// mod reduce_sum_single_axis_fp8x23_1D; -// mod reduce_sum_single_axis_fp8x23_2D_default; -// mod reduce_sum_single_axis_fp8x23_2D_keepdims; -// mod reduce_sum_single_axis_fp8x23_2D_axis_1; -// mod reduce_sum_single_axis_i32_1D; -// mod reduce_sum_single_axis_i32_2D_default; -// mod reduce_sum_single_axis_i32_2D_keepdims; -// mod reduce_sum_single_axis_i32_2D_axis_1; -// mod reduce_sum_single_axis_i8_1D; -// mod reduce_sum_single_axis_i8_2D_default; -// mod reduce_sum_single_axis_i8_2D_keepdims; -// mod reduce_sum_single_axis_i8_2D_axis_1; -// mod reduce_sum_single_axis_u32_1D; -// mod reduce_sum_single_axis_u32_2D_default; -// mod reduce_sum_single_axis_u32_2D_keepdims; -// mod reduce_sum_single_axis_u32_2D_axis_1; +mod abs_fp16x16; +mod abs_fp8x23; +mod abs_i32; +mod abs_i8; +mod acos_fp16x16; +mod acos_fp8x23; +mod acosh_fp16x16; +mod acosh_fp8x23; +mod add_fp16x16; +mod add_fp16x16_broadcast; +mod add_fp8x23; +mod add_fp8x23_broadcast; +mod add_i32; +mod add_i32_broadcast; +mod add_i8; +mod add_i8_broadcast; +mod add_u32; +mod add_u32_broadcast; +mod argmax_fp16x16_1D_default; +mod argmax_fp16x16_1D_keepdims_false; +mod argmax_fp16x16_1D_last_index; +mod argmax_fp16x16_2D_default; +mod argmax_fp16x16_2D_keepdims_false; +mod argmax_fp16x16_2D_last_index; +mod argmax_fp16x16_3D_default; +mod argmax_fp16x16_3D_keepdims_false; +mod argmax_fp16x16_3D_last_index; +mod argmax_fp8x23_1D_default; +mod argmax_fp8x23_1D_keepdims_false; +mod argmax_fp8x23_1D_last_index; +mod argmax_fp8x23_2D_default; +mod argmax_fp8x23_2D_keepdims_false; +mod argmax_fp8x23_2D_last_index; +mod argmax_fp8x23_3D_default; +mod argmax_fp8x23_3D_keepdims_false; +mod argmax_fp8x23_3D_last_index; +mod argmax_i32_1D_default; +mod argmax_i32_1D_keepdims_false; +mod argmax_i32_1D_last_index; +mod argmax_i32_2D_default; +mod argmax_i32_2D_keepdims_false; +mod argmax_i32_2D_last_index; +mod argmax_i32_3D_default; +mod argmax_i32_3D_keepdims_false; +mod argmax_i32_3D_last_index; +mod argmax_i8_1D_default; +mod argmax_i8_1D_keepdims_false; +mod argmax_i8_1D_last_index; +mod argmax_i8_2D_default; +mod argmax_i8_2D_keepdims_false; +mod argmax_i8_2D_last_index; +mod argmax_i8_3D_default; +mod argmax_i8_3D_keepdims_false; +mod argmax_i8_3D_last_index; +mod argmax_u32_1D_default; +mod argmax_u32_1D_keepdims_false; +mod argmax_u32_1D_last_index; +mod argmax_u32_2D_default; +mod argmax_u32_2D_keepdims_false; +mod argmax_u32_2D_last_index; +mod argmax_u32_3D_default; +mod argmax_u32_3D_keepdims_false; +mod argmax_u32_3D_last_index; +mod argmin_fp16x16_1D_default; +mod argmin_fp16x16_1D_keepdims_false; +mod argmin_fp16x16_1D_last_index; +mod argmin_fp16x16_2D_default; +mod argmin_fp16x16_2D_keepdims_false; +mod argmin_fp16x16_2D_last_index; +mod argmin_fp16x16_3D_default; +mod argmin_fp16x16_3D_keepdims_false; +mod argmin_fp16x16_3D_last_index; +mod argmin_fp8x23_1D_default; +mod argmin_fp8x23_1D_keepdims_false; +mod argmin_fp8x23_1D_last_index; +mod argmin_fp8x23_2D_default; +mod argmin_fp8x23_2D_keepdims_false; +mod argmin_fp8x23_2D_last_index; +mod argmin_fp8x23_3D_default; +mod argmin_fp8x23_3D_keepdims_false; +mod argmin_fp8x23_3D_last_index; +mod argmin_i32_1D_default; +mod argmin_i32_1D_keepdims_false; +mod argmin_i32_1D_last_index; +mod argmin_i32_2D_default; +mod argmin_i32_2D_keepdims_false; +mod argmin_i32_2D_last_index; +mod argmin_i32_3D_default; +mod argmin_i32_3D_keepdims_false; +mod argmin_i32_3D_last_index; +mod argmin_i8_1D_default; +mod argmin_i8_1D_keepdims_false; +mod argmin_i8_1D_last_index; +mod argmin_i8_2D_default; +mod argmin_i8_2D_keepdims_false; +mod argmin_i8_2D_last_index; +mod argmin_i8_3D_default; +mod argmin_i8_3D_keepdims_false; +mod argmin_i8_3D_last_index; +mod argmin_u32_1D_default; +mod argmin_u32_1D_keepdims_false; +mod argmin_u32_1D_last_index; +mod argmin_u32_2D_default; +mod argmin_u32_2D_keepdims_false; +mod argmin_u32_2D_last_index; +mod argmin_u32_3D_default; +mod argmin_u32_3D_keepdims_false; +mod argmin_u32_3D_last_index; +mod asin_fp16x16; +mod asin_fp8x23; +mod asinh_fp16x16; +mod asinh_fp8x23; +mod atan_fp16x16; +mod atan_fp8x23; +mod ceil_fp16x16; +mod ceil_fp8x23; +mod concat_fp16x16_1d; +mod concat_fp16x16_2d; +mod concat_fp16x16_3d_default; +mod concat_fp16x16_3d_axis_1; +mod concat_fp16x16_3d_axis_2; +mod concat_fp16x16_3d_three_tensors_axis_1; +mod concat_fp16x16_3d_three_tensors_axis_2; +mod concat_fp8x23_1d; +mod concat_fp8x23_2d; +mod concat_fp8x23_3d_default; +mod concat_fp8x23_3d_axis_1; +mod concat_fp8x23_3d_axis_2; +mod concat_fp8x23_3d_three_tensors_axis_1; +mod concat_fp8x23_3d_three_tensors_axis_2; +mod concat_i32_1d; +mod concat_i32_2d; +mod concat_i32_3d_default; +mod concat_i32_3d_axis_1; +mod concat_i32_3d_axis_2; +mod concat_i32_3d_three_tensors_axis_1; +mod concat_i32_3d_three_tensors_axis_2; +mod concat_i8_1d; +mod concat_i8_2d; +mod concat_i8_3d_default; +mod concat_i8_3d_axis_1; +mod concat_i8_3d_axis_2; +mod concat_i8_3d_three_tensors_axis_1; +mod concat_i8_3d_three_tensors_axis_2; +mod concat_u32_1d; +mod concat_u32_2d; +mod concat_u32_3d_default; +mod concat_u32_3d_axis_1; +mod concat_u32_3d_axis_2; +mod concat_u32_3d_three_tensors_axis_1; +mod concat_u32_3d_three_tensors_axis_2; +mod cos_fp16x16; +mod cos_fp8x23; +mod cosh_fp16x16; +mod cosh_fp8x23; +mod cumsum_fp16x16_1d_default; +mod cumsum_fp16x16_1d_exclusive; +mod cumsum_fp16x16_1d_reverse; +mod cumsum_fp16x16_1d_reverse_exclusive; +mod cumsum_fp16x16_2d_axis_0; +mod cumsum_fp16x16_2d_axis_1; +mod cumsum_fp8x23_1d_default; +mod cumsum_fp8x23_1d_exclusive; +mod cumsum_fp8x23_1d_reverse; +mod cumsum_fp8x23_1d_reverse_exclusive; +mod cumsum_fp8x23_2d_axis_0; +mod cumsum_fp8x23_2d_axis_1; +mod cumsum_i32_1d_default; +mod cumsum_i32_1d_exclusive; +mod cumsum_i32_1d_reverse; +mod cumsum_i32_1d_reverse_exclusive; +mod cumsum_i32_2d_axis_0; +mod cumsum_i32_2d_axis_1; +mod cumsum_i8_1d_default; +mod cumsum_i8_1d_exclusive; +mod cumsum_i8_1d_reverse; +mod cumsum_i8_1d_reverse_exclusive; +mod cumsum_i8_2d_axis_0; +mod cumsum_i8_2d_axis_1; +mod cumsum_u32_1d_default; +mod cumsum_u32_1d_exclusive; +mod cumsum_u32_1d_reverse; +mod cumsum_u32_1d_reverse_exclusive; +mod cumsum_u32_2d_axis_0; +mod cumsum_u32_2d_axis_1; +mod div_fp16x16; +mod div_fp16x16_broadcast; +mod div_fp8x23; +mod div_fp8x23_broadcast; +mod div_i32; +mod div_i32_broadcast; +mod div_i8; +mod div_i8_broadcast; +mod div_u32; +mod div_u32_broadcast; +mod equal_fp16x16; +mod equal_fp16x16_broadcast; +mod equal_fp8x23; +mod equal_fp8x23_broadcast; +mod equal_i32; +mod equal_i32_broadcast; +mod equal_i8; +mod equal_i8_broadcast; +mod equal_u32; +mod equal_u32_broadcast; +mod exp_fp16x16; +mod exp_fp8x23; +mod less_equal_fp16x16; +mod less_equal_fp16x16_broadcast; +mod less_equal_fp8x23; +mod less_equal_fp8x23_broadcast; +mod less_equal_i32; +mod less_equal_i32_broadcast; +mod less_equal_i8; +mod less_equal_i8_broadcast; +mod less_equal_u32; +mod less_equal_u32_broadcast; +mod greater_fp16x16; +mod greater_fp16x16_broadcast; +mod greater_fp8x23; +mod greater_fp8x23_broadcast; +mod greater_i32; +mod greater_i32_broadcast; +mod greater_i8; +mod greater_i8_broadcast; +mod greater_u32; +mod greater_u32_broadcast; +mod leaky_relu_fp16x16; +mod leaky_relu_fp8x23; +mod linear_fp16x16; +mod linear_fp8x23; +mod linear_i32; +mod linear_i8; +mod linear_u32; +mod log_fp16x16; +mod log_fp8x23; +mod logsoftmax_fp16x16_axis_0; +mod logsoftmax_fp16x16_axis_1; +mod logsoftmax_fp8x23_axis_0; +mod logsoftmax_fp8x23_axis_1; +mod matmul_fp16x16_1d; +mod matmul_fp16x16_2x2; +mod matmul_fp16x16_2x1; +mod matmul_fp16x16_1x2; +mod matmul_fp8x23_1d; +mod matmul_fp8x23_2x2; +mod matmul_fp8x23_2x1; +mod matmul_fp8x23_1x2; +mod matmul_i32_1d; +mod matmul_i32_2x2; +mod matmul_i32_2x1; +mod matmul_i32_1x2; +mod matmul_i8_1d; +mod matmul_i8_2x2; +mod matmul_i8_2x1; +mod matmul_i8_1x2; +mod matmul_u32_1d; +mod matmul_u32_2x2; +mod matmul_u32_2x1; +mod matmul_u32_1x2; +mod mul_fp16x16; +mod mul_fp16x16_broadcast; +mod mul_fp8x23; +mod mul_fp8x23_broadcast; +mod mul_i32; +mod mul_i32_broadcast; +mod mul_i8; +mod mul_i8_broadcast; +mod mul_u32; +mod mul_u32_broadcast; +mod or_fp16x16; +mod or_fp16x16_broadcast; +mod or_fp8x23; +mod or_fp8x23_broadcast; +mod or_i32; +mod or_i32_broadcast; +mod or_i8; +mod or_i8_broadcast; +mod or_u32; +mod or_u32_broadcast; +mod relu_fp16x16; +mod relu_fp8x23; +mod relu_i32; +mod relu_i8; +mod sigmoid_fp16x16; +mod sigmoid_fp8x23; +mod sin_fp16x16; +mod sin_fp8x23; +mod sinh_fp16x16; +mod sinh_fp8x23; +mod softmax_fp16x16; +mod softmax_fp8x23; +mod softplus_fp8x23; +mod softplus_fp16x16; +mod softsign_fp8x23; +mod softsign_fp16x16; +mod sqrt_fp16x16; +mod sqrt_fp8x23; +mod sub_fp16x16; +mod sub_fp16x16_broadcast; +mod sub_fp8x23; +mod sub_fp8x23_broadcast; +mod sub_i32; +mod sub_i32_broadcast; +mod sub_i8; +mod sub_i8_broadcast; +mod sub_u32; +mod sub_u32_broadcast; +mod tanh_fp16x16; +mod tanh_fp8x23; +mod transpose_fp16x16_2d; +mod transpose_fp16x16_3d; +mod transpose_fp8x23_2d; +mod transpose_fp8x23_3d; +mod transpose_i32_2d; +mod transpose_i32_3d; +mod transpose_i8_2d; +mod transpose_i8_3d; +mod transpose_u32_2d; +mod transpose_u32_3d; +mod xor_fp16x16; +mod xor_fp16x16_broadcast; +mod xor_fp8x23; +mod xor_fp8x23_broadcast; +mod xor_i32; +mod xor_i32_broadcast; +mod xor_i8; +mod xor_i8_broadcast; +mod xor_u32; +mod xor_u32_broadcast; +mod less_fp16x16; +mod less_fp16x16_broadcast; +mod less_fp8x23; +mod less_fp8x23_broadcast; +mod less_i32; +mod less_i32_broadcast; +mod less_i8; +mod less_i8_broadcast; +mod less_u32; +mod less_u32_broadcast; +mod greater_equal_fp16x16; +mod greater_equal_fp16x16_broadcast; +mod greater_equal_fp8x23; +mod greater_equal_fp8x23_broadcast; +mod greater_equal_i32; +mod greater_equal_i32_broadcast; +mod greater_equal_i8; +mod greater_equal_i8_broadcast; +mod greater_equal_u32; +mod greater_equal_u32_broadcast; +mod slice_fp16x16_2d; +mod slice_fp16x16_3d; +mod slice_fp8x23_2d; +mod slice_fp8x23_3d; +mod slice_i32_2d; +mod slice_i32_3d; +mod slice_i8_2d; +mod slice_i8_3d; +mod slice_u32_2d; +mod slice_u32_3d; +mod gather_fp8x23_3d_default; +mod gather_fp8x23_3d_axis1; +mod gather_fp8x23_3d_axis2; +mod gather_fp16x16_3d_default; +mod gather_fp16x16_3d_axis1; +mod gather_fp16x16_3d_axis2; +mod gather_i8_3d_default; +mod gather_i8_3d_axis1; +mod gather_i8_3d_axis2; +mod gather_i32_3d_default; +mod gather_i32_3d_axis1; +mod gather_i32_3d_axis2; +mod gather_u32_3d_default; +mod gather_u32_3d_axis1; +mod gather_u32_3d_axis2; +mod nonzero_fp16x16_2d; +mod nonzero_fp16x16_3d; +mod nonzero_fp8x23_2d; +mod nonzero_fp8x23_3d; +mod nonzero_i32_2d; +mod nonzero_i32_3d; +mod nonzero_i8_2d; +mod nonzero_i8_3d; +mod nonzero_u32_2d; +mod nonzero_u32_3d; +mod squeeze_fP16x16; +mod squeeze_fP8x23; +mod squeeze_i32; +mod squeeze_i8; +mod squeeze_u32; +mod unsqueeze_fp16x16_2d; +mod unsqueeze_fp16x16_3d; +mod unsqueeze_fp8x23_2d; +mod unsqueeze_fp8x23_3d; +mod unsqueeze_i32_2d; +mod unsqueeze_i32_3d; +mod unsqueeze_i8_2d; +mod unsqueeze_i8_3d; +mod unsqueeze_u32_2d; +mod unsqueeze_u32_3d; +mod sign_fP16x16; +mod sign_fP8x23; +mod sign_fail; +mod sign_i32; +mod sign_i8; +mod clip_fp16x16_2d; +mod clip_fp16x16_3d; +mod clip_fp8x23_2d; +mod clip_fp8x23_3d; +mod clip_i32_2d; +mod clip_i32_3d; +mod clip_i8_2d; +mod clip_i8_3d; +mod clip_u32_2d; +mod clip_u32_3d; +mod identity_fP16x16; +mod identity_fP8x23; +mod identity_i32; +mod identity_i8; +mod identity_u32; +mod thresholded_relu_fp16x16; +mod thresholded_relu_fp8x23; +mod hard_sigmoid_fp8x23; +mod hard_sigmoid_fp16x16; +mod neg_fp16x16; +mod neg_fp8x23; +mod neg_i32; +mod neg_i8; +mod gemm_all_attributes; +mod gemm_alpha; +mod gemm_beta; +mod gemm_default_matrix_bias; +mod gemm_default_vector_bias; +mod gemm_default_no_bias; +mod gemm_transposeA; +mod gemm_transposeB; +mod min_fp16x16_three_tensors; +mod min_fp16x16_broadcast_three_tensors; +mod min_fp16x16_two_tensors; +mod min_fp16x16_broadcast_two_tensors; +mod min_fp8x23_three_tensors; +mod min_fp8x23_broadcast_three_tensors; +mod min_fp8x23_two_tensors; +mod min_fp8x23_broadcast_two_tensors; +mod min_i32_three_tensors; +mod min_i32_broadcast_three_tensors; +mod min_i32_two_tensors; +mod min_i32_broadcast_two_tensors; +mod min_i8_three_tensors; +mod min_i8_broadcast_three_tensors; +mod min_i8_two_tensors; +mod min_i8_broadcast_two_tensors; +mod min_u32_three_tensors; +mod min_u32_broadcast_three_tensors; +mod min_u32_two_tensors; +mod min_u32_broadcast_two_tensors; +mod where_fp16x16; +mod where_fp16x16_broadcast; +mod where_fp8x23; +mod where_fp8x23_broadcast; +mod where_i32; +mod where_i32_broadcast; +mod where_i8; +mod where_i8_broadcast; +mod where_u32; +mod where_u32_broadcast; +mod not_bool; +mod round_fp16x16; +mod round_fp8x23; +mod max_fp16x16_three_tensors; +mod max_fp16x16_broadcast_three_tensors; +mod max_fp16x16_two_tensors; +mod max_fp16x16_broadcast_two_tensors; +mod max_fp8x23_three_tensors; +mod max_fp8x23_broadcast_three_tensors; +mod max_fp8x23_two_tensors; +mod max_fp8x23_broadcast_two_tensors; +mod max_i32_three_tensors; +mod max_i32_broadcast_three_tensors; +mod max_i32_two_tensors; +mod max_i32_broadcast_two_tensors; +mod max_i8_three_tensors; +mod max_i8_broadcast_three_tensors; +mod max_i8_two_tensors; +mod max_i8_broadcast_two_tensors; +mod max_u32_three_tensors; +mod max_u32_broadcast_three_tensors; +mod max_u32_two_tensors; +mod max_u32_broadcast_two_tensors; +mod scatter_fp16x16_3d_default; +mod scatter_fp16x16_3d_axis1; +mod scatter_fp16x16_3d_axis1_add; +mod scatter_fp8x23_default; +mod scatter_fp8x23_axis1; +mod scatter_fp8x23_mul; +mod scatter_i8_default; +mod scatter_i8_axis1; +mod scatter_i8_axis1_max; +mod scatter_u32_default; +mod scatter_u32_axis1; +mod scatter_u32_add; +mod array_feature_extractor_1D_i32; +mod array_feature_extractor_1D_fp8x23; +mod array_feature_extractor_1D_fp16x16; +mod array_feature_extractor_2D_i32; +mod array_feature_extractor_2D_fp8x23; +mod array_feature_extractor_2D_fp16x16; +mod array_feature_extractor_3D_i32; +mod array_feature_extractor_3D_fp8x23; +mod array_feature_extractor_3D_fp16x16; +mod binarizer_fp16x16; +mod binarizer_fp8x23; +mod tril_fp16x16; +mod tril_fp16x16_neg; +mod tril_fp16x16_one_row; +mod tril_fp16x16_out_neg; +mod tril_fp16x16_out_pos; +mod tril_fp16x16_pos; +mod tril_fp16x16_square; +mod tril_fp16x16_square_neg; +mod tril_fp16x16_zero; +mod triu_fp16x16; +mod triu_fp16x16_neg; +mod triu_fp16x16_one_row; +mod triu_fp16x16_out_neg; +mod triu_fp16x16_out_pos; +mod triu_fp16x16_pos; +mod triu_fp16x16_square; +mod triu_fp16x16_square_neg; +mod triu_fp16x16_zero; +mod tril_fp8x23; +mod tril_fp8x23_neg; +mod tril_fp8x23_one_row; +mod tril_fp8x23_out_neg; +mod tril_fp8x23_out_pos; +mod tril_fp8x23_pos; +mod tril_fp8x23_square; +mod tril_fp8x23_square_neg; +mod tril_fp8x23_zero; +mod triu_fp8x23; +mod triu_fp8x23_neg; +mod triu_fp8x23_one_row; +mod triu_fp8x23_out_neg; +mod triu_fp8x23_out_pos; +mod triu_fp8x23_pos; +mod triu_fp8x23_square; +mod triu_fp8x23_square_neg; +mod triu_fp8x23_zero; +mod tril_i32; +mod tril_neg_i32; +mod tril_i32_one_row; +mod tril_i32_out_neg; +mod tril_i32_out_pos; +mod tril_i32_pos; +mod tril_i32_square; +mod tril_i32_square_neg; +mod tril_i32_zero; +mod triu_i32; +mod triu_i32_neg; +mod triu_i32_one_row; +mod triu_i32_out_neg; +mod triu_i32_out_pos; +mod triu_i32_pos; +mod triu_i32_square; +mod triu_i32_square_neg; +mod triu_i32_zero; +mod tril_i8; +mod tril_i8_neg; +mod tril_i8_one_row; +mod tril_i8_out_neg; +mod tril_i8_out_pos; +mod tril_i8_pos; +mod tril_i8_square; +mod tril_i8_square_neg; +mod tril_i8_zero; +mod triu_i8; +mod triu_i8_neg; +mod triu_i8_one_row; +mod triu_i8_out_neg; +mod triu_i8_out_pos; +mod triu_i8_pos; +mod triu_i8_square; +mod triu_i8_square_neg; +mod triu_i8_zero; +mod tril_u32; +mod tril_u32_neg; +mod tril_u32_one_row; +mod tril_u32_out_neg; +mod tril_u32_out_pos; +mod tril_u32_pos; +mod tril_u32_square; +mod tril_u32_square_neg; +mod tril_u32_zero; +mod triu_u32; +mod triu_u32_neg; +mod triu_u32_one_row; +mod triu_u32_out_neg; +mod triu_u32_out_pos; +mod triu_u32_pos; +mod triu_u32_square; +mod triu_u32_square_neg; +mod triu_u32_zero; +mod reduce_sum_square_fp16x16_export_do_not_keepdims; +mod reduce_sum_square_fp16x16_export_keepdims; +mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; +mod reduce_sum_square_fp8x23_export_do_not_keepdims; +mod reduce_sum_square_fp8x23_export_keepdims; +mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; +mod reduce_sum_square_i32_export_do_not_keepdims; +mod reduce_sum_square_i32_export_keepdims; +mod reduce_sum_square_i32_export_negative_axes_keepdims; +mod reduce_sum_square_i8_export_do_not_keepdims; +mod reduce_sum_square_i8_export_keepdims; +mod reduce_sum_square_i8_export_negative_axes_keepdims; +mod reduce_sum_square_u32_export_do_not_keepdims; +mod reduce_sum_square_u32_export_keepdims; +mod reduce_sum_square_u32_export_negative_axes_keepdims; +mod reduce_l2_fp16x16_export_do_not_keepdims; +mod reduce_l2_fp16x16_export_keepdims; +mod reduce_l2_fp16x16_export_negative_axes_keepdims; +mod reduce_l2_fp8x23_export_do_not_keepdims; +mod reduce_l2_fp8x23_export_keepdims; +mod reduce_l2_fp8x23_export_negative_axes_keepdims; +mod reduce_l1_fp16x16_export_do_not_keepdims; +mod reduce_l1_fp16x16_export_keepdims; +mod reduce_l1_fp16x16_export_negative_axes_keepdims; +mod reduce_l1_fp8x23_export_do_not_keepdims; +mod reduce_l1_fp8x23_export_keepdims; +mod reduce_l1_fp8x23_export_negative_axes_keepdims; +mod reduce_l1_i32_export_do_not_keepdims; +mod reduce_l1_i32_export_keepdims; +mod reduce_l1_i32_export_negative_axes_keepdims; +mod reduce_l1_i8_export_do_not_keepdims; +mod reduce_l1_i8_export_keepdims; +mod reduce_l1_i8_export_negative_axes_keepdims; +mod reduce_l1_u32_export_do_not_keepdims; +mod reduce_l1_u32_export_keepdims; +mod reduce_l1_u32_export_negative_axes_keepdims; +mod reduce_prod_fp16x16_1D; +mod reduce_prod_fp16x16_2D_default; +mod reduce_prod_fp16x16_2D_keepdims; +mod reduce_prod_fp16x16_2D_axis_1; +mod reduce_prod_fp8x23_1D; +mod reduce_prod_fp8x23_2D_default; +mod reduce_prod_fp8x23_2D_keepdims; +mod reduce_prod_fp8x23_2D_axis_1; +mod reduce_prod_i32_1D; +mod reduce_prod_i32_2D_default; +mod reduce_prod_i32_2D_keepdims; +mod reduce_prod_i32_2D_axis_1; +mod reduce_prod_i8_1D; +mod reduce_prod_i8_2D_default; +mod reduce_prod_i8_2D_keepdims; +mod reduce_prod_i8_2D_axis_1; +mod reduce_prod_u32_1D; +mod reduce_prod_u32_2D_default; +mod reduce_prod_u32_2D_keepdims; +mod reduce_prod_u32_2D_axis_1; +mod gather_elements_fp16x16_3d_default; +mod gather_elements_fp16x16_3d_axis1; +mod gather_elements_fp16x16_3d_axis2; +mod gather_elements_fp8x23_3d_default; +mod gather_elements_fp8x23_3d_axis1; +mod gather_elements_fp8x23_3d_axis2; +mod gather_elements_i8_3d_default; +mod gather_elements_i8_3d_axis1; +mod gather_elements_i32_3d_default; +mod gather_elements_i32_3d_axis1; +mod gather_elements_i32_3d_axis2; +mod gather_elements_u32_default; +mod gather_elements_u32_axis1; +mod gather_elements_u32_axis2; +mod gather_elements_u32_axis3; +mod sequence_length_fp16x16; +mod sequence_length_fp16x16_broadcast; +mod sequence_length_fp8x23; +mod sequence_length_fp8x23_broadcast; +mod sequence_length_i32; +mod sequence_length_i32_broadcast; +mod sequence_length_i8; +mod sequence_length_i8_broadcast; +mod sequence_length_u32; +mod sequence_length_u32_broadcast; +mod sequence_at_u32_positive; +mod sequence_at_u32_negative; +mod sequence_at_fp16x16_positive; +mod sequence_at_fp16x16_negative; +mod sequence_at_fp8x23_positive; +mod sequence_at_fp8x23_negative; +mod sequence_at_i32_positive; +mod sequence_at_i32_negative; +mod sequence_at_i8_positive; +mod sequence_at_i8_negative; +mod reduce_min_fp16x16_1D; +mod reduce_min_fp16x16_2D_default; +mod reduce_min_fp16x16_2D_keepdims; +mod reduce_min_fp16x16_2D_axis_1; +mod reduce_min_fp8x23_1D; +mod reduce_min_fp8x23_2D_default; +mod reduce_min_fp8x23_2D_keepdims; +mod reduce_min_fp8x23_2D_axis_1; +mod reduce_min_i32_1D; +mod reduce_min_i32_2D_default; +mod reduce_min_i32_2D_keepdims; +mod reduce_min_i32_2D_axis_1; +mod reduce_min_i8_1D; +mod reduce_min_i8_2D_default; +mod reduce_min_i8_2D_keepdims; +mod reduce_min_i8_2D_axis_1; +mod reduce_min_u32_1D; +mod reduce_min_u32_2D_default; +mod reduce_min_u32_2D_keepdims; +mod reduce_min_u32_2D_axis_1; +mod sequence_construct_fp16x16; +mod sequence_construct_fp8x23; +mod sequence_construct_i32; +mod sequence_construct_i8; +mod sequence_construct_u32; +mod shrink_hard_fp16x16; +mod shrink_soft_fp16x16; +mod shrink_hard_fp8x23; +mod shrink_soft_fp8x23; +mod sequence_empty_fp16x16; +mod sequence_empty_fp8x23; +mod sequence_empty_i32; +mod sequence_empty_i8; +mod sequence_empty_u32; +mod reduce_mean_fp16x16_1D; +mod reduce_mean_fp16x16_2D_default; +mod reduce_mean_fp16x16_2D_keepdims; +mod reduce_mean_fp16x16_2D_axis_1; +mod reduce_mean_fp8x23_1D; +mod reduce_mean_fp8x23_2D_default; +mod reduce_mean_fp8x23_2D_keepdims; +mod reduce_mean_fp8x23_2D_axis_1; +mod reduce_mean_i32_1D; +mod reduce_mean_i32_2D_default; +mod reduce_mean_i32_2D_keepdims; +mod reduce_mean_i32_2D_axis_1; +mod reduce_mean_i8_1D; +mod reduce_mean_i8_2D_default; +mod reduce_mean_i8_2D_keepdims; +mod reduce_mean_i8_2D_axis_1; +mod reduce_mean_u32_1D; +mod reduce_mean_u32_2D_default; +mod reduce_mean_u32_2D_keepdims; +mod reduce_mean_u32_2D_axis_1; +mod pow_fp16x16; +mod pow_fp16x16_broadcast; +mod pow_fp8x23; +mod pow_fp8x23_broadcast; +mod sequence_erase_u32_positive; +mod sequence_erase_u32_negative; +mod sequence_erase_u32_empty; +mod sequence_erase_fp16x16_positive; +mod sequence_erase_fp16x16_negative; +mod sequence_erase_fp16x16_empty; +mod sequence_erase_fp8x23_positive; +mod sequence_erase_fp8x23_negative; +mod sequence_erase_fp8x23_empty; +mod sequence_erase_i32_positive; +mod sequence_erase_i32_negative; +mod sequence_erase_i32_empty; +mod sequence_erase_i8_positive; +mod sequence_erase_i8_negative; +mod sequence_erase_i8_empty; +mod sequence_insert_fp16x16; +mod sequence_insert_fp8x23; +mod sequence_insert_i32; +mod sequence_insert_i8; +mod sequence_insert_u32; +mod concat_from_sequence_fp8x23_new_axis_zero; +mod concat_from_sequence_fp8x23_new_axis_one; +mod concat_from_sequence_fp8x23_new_axis_default; +mod concat_from_sequence_fp16x16_new_axis_zero; +mod concat_from_sequence_fp16x16_new_axis_one; +mod concat_from_sequence_fp16x16_new_axis_default; +mod concat_from_sequence_i32_new_axis_zero; +mod concat_from_sequence_i32_new_axis_one; +mod concat_from_sequence_i32_new_axis_default; +mod concat_from_sequence_i8_new_axis_zero; +mod concat_from_sequence_i8_new_axis_one; +mod concat_from_sequence_i8_new_axis_default; +mod concat_from_sequence_u32_new_axis_zero; +mod concat_from_sequence_u32_new_axis_one; +mod concat_from_sequence_u32_new_axis_default; +mod is_nan_fp16x16; +mod is_nan_fp8x23; +mod is_inf_fp16x16; +mod is_inf_fp8x23; +mod is_inf_i32; +mod is_inf_i8; +mod is_inf_u32; +mod is_pos_inf_fp16x16; +mod is_neg_inf_fp16x16; +mod is_pos_inf_fp8x23; +mod is_neg_inf_fp8x23; +mod is_pos_inf_i32; +mod is_neg_inf_i32; +mod is_pos_inf_i8; +mod is_neg_inf_i8; +mod reduce_log_sum_fp8x23_export_do_not_keepdims; +mod reduce_log_sum_fp8x23_export_keepdims; +mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; +mod reduce_log_sum_fp16x16_export_do_not_keepdims; +mod reduce_log_sum_fp16x16_export_keepdims; +mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; +mod and_bool; +mod erf_fp16x16; +mod erf_fp8x23; +mod unique_fp16x16_without_axis_sorted; +mod unique_fp16x16_with_axis_zero_sorted; +mod unique_u32_without_axis_sorted; +mod unique_u32_without_axis_not_sorted; +mod unique_u32_with_axis_zero_sorted; +mod unique_u32_with_axis_zero_not_sorted; +mod unique_u32_with_axis_one_sorted; +mod unique_u32_with_axis_one_not_sorted; +mod gather_nd_fp16x16_3d_default; +mod gather_nd_fp16x16_3d_batch_dims1; +mod gather_nd_fp16x16_3d_batch_dims2; +mod gather_nd_fp8x23_3d_default; +mod gather_nd_fp8x23_3d_batch_dims1; +mod gather_nd_fp8x23_3d_batch_dims2; +mod gather_nd_i32_3d_default; +mod gather_nd_i32_3d_batch_dims1; +mod gather_nd_i32_3d_batch_dims2; +mod gather_nd_i8_3d_default; +mod gather_nd_i8_3d_batch_dims1; +mod gather_nd_u32_default; +mod gather_nd_u32_batch_dims1; +mod gather_nd_u32_batch_dims2; +mod resize_upsample_scales_nearest; +mod resize_downsample_scales_cubic; +mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; +mod resize_downsample_scales_cubic_align_corners; +mod resize_upsample_scales_linear; +mod resize_downsample_scales_linear_align_corners; +mod resize_downsample_scales_nearest; +mod resize_upsample_scales_cubic; +mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; +mod resize_upsample_scales_cubic_align_corners; +mod resize_upsample_scales_cubic_asymmetric; +mod resize_upsample_scales_linear_align_corners; +mod resize_upsample_sizes_nearest; +mod resize_upsample_sizes_cubic; +mod resize_downsample_sizes_cubic; +mod resize_downsample_sizes_nearest; +mod resize_upsample_scales_linear_half_pixel_symmetric; +mod resize_downsample_scales_cubic_antialias; +mod resize_downsample_scales_linear_antialias; +mod resize_downsample_sizes_cubic_antialias; +mod resize_downsample_sizes_linear_pytorch_half_pixel; +mod resize_tf_crop_and_resize; +mod resize_tf_crop_and_resize_extrapolation_value; +mod resize_upsample_scales_nearest_axes_2_3; +mod resize_upsample_scales_nearest_axes_3_2; +mod resize_upsample_sizes_nearest_axes_2_3; +mod resize_upsample_sizes_nearest_ceil_half_pixel; +mod resize_upsample_sizes_nearest_floor_align_corners; +mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; +mod resize_downsample_scales_linear_half_pixel_symmetric; +mod resize_downsample_sizes_nearest_not_larger; +mod resize_downsample_sizes_nearest_not_smaller; +mod resize_tf_crop_and_resize_axes_2_3; +mod resize_tf_crop_and_resize_axes_3_2; +mod resize_upsample_sizes_nearest_axes_3_2; +mod resize_upsample_sizes_nearest_not_larger; +mod resize_upsample_sizes_nearest_not_smaller; +mod compress_fp16x16_3d_default; +mod compress_fp16x16_3d_axis1; +mod compress_fp16x16_3d_axis2; +mod compress_fp16x16_3d_axis3; +mod compress_fp16x16_3d_noaxis; +mod compress_fp8x23_3d_default; +mod compress_fp8x23_3d_axis1; +mod compress_fp8x23_3d_axis2; +mod compress_i32_3d_default; +mod compress_i32_3d_axis1; +mod compress_i32_3d_axis2; +mod compress_i8_3d_default; +mod compress_i8_3d_axis1; +mod compress_i8_3d_axis2; +mod compress_u32_3d_default; +mod compress_u32_3d_axis1; +mod compress_u32_3d_axis2; +mod compress_u32_3d_axis2_2; +mod compress_u32_3d_axis3; +mod reduce_log_sum_exp_fp32x32_export_do_not_keepdims; +mod reduce_log_sum_exp_fp32x32_export_keepdims; +mod reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims; +mod layer_normalization_default_axis; +mod layer_normalization_4d_axis0; +mod layer_normalization_4d_axis1; +mod layer_normalization_4d_axis2; +mod layer_normalization_4d_axis3; +mod layer_normalization_3d_axis0_epsilon; +mod layer_normalization_3d_axis_negative_3_epsilon; +mod layer_normalization_3d_axis1_epsilon; +mod layer_normalization_3d_axis2_epsilon; +mod layer_normalization_4d_axis_negative_4; +mod layer_normalization_4d_axis_negative_3; +mod layer_normalization_4d_axis_negative_2; +mod layer_normalization_4d_axis_negative_1; +mod layer_normalization_3d_axis_negative_2_epsilon; +mod layer_normalization_3d_axis_negative_1_epsilon; +mod layer_normalization_test; +mod split_u32_1d_equal_parts; +mod split_u32_2d_equal_parts; +mod split_u32_zero_size; +mod split_u32_1d_variable_parts; +mod split_u32_2d_variable_parts; +mod split_u32_1d_uneven; +mod split_u32_2d_uneven; +mod split_fp16x16_1d_equal_parts; +mod split_fp16x16_1d_variable_parts; +mod split_fp16x16_2d_equal_parts; +mod split_fp16x16_2d_variable_parts; +mod split_fp16x16_zero_size; +mod split_fp16x16_1d_uneven; +mod split_fp16x16_2d_uneven; +mod grid_sample; +mod grid_sample_cubic; +mod grid_sample_aligncorners; +mod grid_sample_nearest; +mod grid_sample_nearest_aligncorner; +mod grid_sample_padding_border; +mod grid_sample_padding_reflection; +mod grid_sample_padding_zeros; +mod col2im; +mod col2im_5D; +mod col2im_dilations; +mod col2im_pads; +mod col2im_strides; +mod random_uniform_like_fp16x16; +mod random_uniform_like_fp8x23; +mod range_fp8x23; +mod range_fp16x16; +mod range_i32; +mod range_i8; +mod range_u32; +mod hann_window_fp8x23; +mod hann_window_fp16x16; +mod hamming_window_fp16x16; +mod hamming_window_fp8x23; +mod blackman_window_fp16x16; +mod blackman_window_fp8x23; +mod split_to_sequence_fp16x16_1d_equal_parts; +mod split_to_sequence_fp16x16_1d_variable_parts; +mod split_to_sequence_fp16x16_2d_equal_parts; +mod split_to_sequence_fp16x16_2d_variable_parts; +mod split_to_sequence_fp16x16_zero_size; +mod split_to_sequence_fp16x16_1d_uneven; +mod split_to_sequence_fp16x16_2d_uneven; +mod split_to_sequence_u32_1d_equal_parts; +mod split_to_sequence_u32_1d_variable_parts; +mod split_to_sequence_u32_2d_equal_parts; +mod split_to_sequence_u32_2d_variable_parts; +mod split_to_sequence_u32_zero_size; +mod split_to_sequence_u32_1d_uneven; +mod split_to_sequence_u32_2d_uneven; +mod split_to_sequence_2d_scalar; +mod split_to_sequence_2d_nokeepdims; +mod split_to_sequence_1d_nokeepdims; +mod reverse_sequence_fp16x16_batch_equal_parts; +mod reverse_sequence_fp16x16_time_equal_parts; +mod reverse_sequence_i32_batch_equal_parts; +mod reverse_sequence_i32_time_equal_parts; +mod reverse_sequence_i8_batch_equal_parts; +mod reverse_sequence_i8_time_equal_parts; +mod reverse_sequence_u32_4x4_batch; +mod reverse_sequence_u32_4x4_time; +mod reverse_sequence_u32_3x3_batch; +mod reverse_sequence_u32_3x3_time; +mod reverse_sequence_different_dimensions_4_5; +mod reverse_sequence_different_dimensions_2_4; +mod reverse_sequence_different_dimensions_1_6; +mod reverse_sequence_different_dimensions_3x9_batch; +mod reverse_sequence_different_dimensions_3x9_time; +mod conv_transpose; +mod conv_transpose_1d; +mod conv_transpose_3d; +mod conv_transpose_attributes; +mod conv_transpose_autopad_same; +mod conv_transpose_dilations; +mod conv_transpose_pads; +mod conv_transpose_group_2; +mod conv_transpose_group_2_image_3; +mod depth_to_space_fp16x16; +mod depth_to_space_fp8x23; +mod depth_to_space_i32; +mod depth_to_space_i8; +mod depth_to_space_u32; +mod space_to_depth_fp16x16; +mod space_to_depth_fp8x23; +mod space_to_depth_i32; +mod space_to_depth_i8; +mod space_to_depth_u32; +mod scatter_nd_fp16x16_3d_default; +mod scatter_nd_fp16x16_3d_add; +mod scatter_nd_fp16x16_3d_mul; +mod scatter_nd_fp16x16_3d_max; +mod scatter_nd_fp16x16_3d_min; +mod scatter_nd_fp8x23_3d_default; +mod scatter_nd_fp8x23_3d_add; +mod scatter_nd_fp8x23_3d_mul; +mod scatter_nd_fp8x23_3d_max; +mod scatter_nd_fp8x23_3d_min; +mod scatter_nd_u32_default; +mod scatter_nd_u32_add; +mod scatter_nd_u32_mul; +mod scatter_nd_u32_max; +mod scatter_nd_u32_min; +mod conv_2D_with_padding; +mod conv_1D_no_padding; +mod conv_1D_with_padding; +mod conv_3D_no_padding; +mod conv_3D_with_padding; +mod conv_4D_no_padding; +mod conv_2D_with_2_groups; +mod conv_2D_with_autopad_same; +mod conv_2D_with_strides_asymmetric_padding; +mod conv_2D_with_strides_with_padding; +mod conv_4D_with_padding; +mod label_encoder_fp16x16_3d_default; +mod label_encoder_fp8x23_default; +mod label_encoder_i8_default; +mod label_encoder_i32_default; +mod label_encoder_u32_default; +mod reduce_sum_single_axis_fp16x16_1D; +mod reduce_sum_single_axis_fp16x16_2D_default; +mod reduce_sum_single_axis_fp16x16_2D_keepdims; +mod reduce_sum_single_axis_fp16x16_2D_axis_1; +mod reduce_sum_single_axis_fp8x23_1D; +mod reduce_sum_single_axis_fp8x23_2D_default; +mod reduce_sum_single_axis_fp8x23_2D_keepdims; +mod reduce_sum_single_axis_fp8x23_2D_axis_1; +mod reduce_sum_single_axis_i32_1D; +mod reduce_sum_single_axis_i32_2D_default; +mod reduce_sum_single_axis_i32_2D_keepdims; +mod reduce_sum_single_axis_i32_2D_axis_1; +mod reduce_sum_single_axis_i8_1D; +mod reduce_sum_single_axis_i8_2D_default; +mod reduce_sum_single_axis_i8_2D_keepdims; +mod reduce_sum_single_axis_i8_2D_axis_1; +mod reduce_sum_single_axis_u32_1D; +mod reduce_sum_single_axis_u32_2D_default; +mod reduce_sum_single_axis_u32_2D_keepdims; +mod reduce_sum_single_axis_u32_2D_axis_1; mod reduce_sum_keep_dims; mod reduce_sum_no_keep_dims; mod reduce_sum_default_axes_keepdims; From c24f30ac330ef1b2fba0fa9b2c548d0db7f33cdc Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Thu, 21 Mar 2024 09:27:35 +0100 Subject: [PATCH 18/68] Update test.yaml --- .github/workflows/test.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index c1fc2cfd6..2cbaddbe3 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -9,5 +9,5 @@ jobs: - uses: actions/checkout@v3 - uses: software-mansion/setup-scarb@v1 with: - scarb-version: "2.5.3" + scarb-version: "2.6.4" - run: scarb test --workspace && scarb fmt --workspace \ No newline at end of file From 711db9097a5db326fc7cffbe8305cc3608172dba Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Thu, 21 Mar 2024 09:28:08 +0100 Subject: [PATCH 19/68] bump version --- Scarb.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Scarb.toml b/Scarb.toml index f05fa6649..ebb3b26a7 100644 --- a/Scarb.toml +++ b/Scarb.toml @@ -1,7 +1,7 @@ [package] name = "orion" -version = "0.2.4" -cairo-version = "2.5.3" +version = "0.2.5" +cairo-version = "2.6.4" edition = "2023_10" description = "ONNX Runtime in Cairo for verifiable ML inference using STARK" homepage = "https://github.com/gizatechxyz/orion" From 5e4bad3601054886bc7f121e3928b132357df458 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Thu, 21 Mar 2024 09:30:10 +0100 Subject: [PATCH 20/68] Update Scarb.toml --- Scarb.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Scarb.toml b/Scarb.toml index ebb3b26a7..27ead9f84 100644 --- a/Scarb.toml +++ b/Scarb.toml @@ -1,7 +1,7 @@ [package] name = "orion" version = "0.2.5" -cairo-version = "2.6.4" +cairo-version = "2.6.3" edition = "2023_10" description = "ONNX Runtime in Cairo for verifiable ML inference using STARK" homepage = "https://github.com/gizatechxyz/orion" From 2dcb46c385f554927ead3484b91d7001cdef1e27 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Thu, 21 Mar 2024 11:30:50 +0100 Subject: [PATCH 21/68] refactor less operator --- src/operators/tensor/core.cairo | 6 +++--- src/operators/tensor/implementations/tensor_bool.cairo | 2 +- .../tensor/implementations/tensor_complex64.cairo | 2 +- .../tensor/implementations/tensor_fp16x16.cairo | 2 +- .../tensor/implementations/tensor_fp16x16wide.cairo | 2 +- .../tensor/implementations/tensor_fp32x32.cairo | 2 +- .../tensor/implementations/tensor_fp64x64.cairo | 2 +- .../tensor/implementations/tensor_fp8x23.cairo | 2 +- .../tensor/implementations/tensor_fp8x23wide.cairo | 2 +- src/operators/tensor/implementations/tensor_i32.cairo | 2 +- src/operators/tensor/implementations/tensor_i8.cairo | 2 +- src/operators/tensor/implementations/tensor_u32.cairo | 2 +- src/operators/tensor/math/less.cairo | 10 +++++----- 13 files changed, 19 insertions(+), 19 deletions(-) diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index c2b9668b1..06791b024 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -1344,7 +1344,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn less_example() -> Tensor { + /// fn less_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1354,10 +1354,10 @@ trait TensorTrait { /// // We can call `less` function as follows. /// return tensor_1.less(@tensor_2); /// } - /// >>> [0,0,0,0,0,0,0,1,1] + /// >>> [false,false,false,false,false,false,false,true,true] /// ``` /// - fn less(self: @Tensor, other: @Tensor) -> Tensor; + fn less(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.less_equal /// /// ```rust diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index 53b5e3060..4d742454e 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -121,7 +121,7 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 0cba48505..229a955a5 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -140,7 +140,7 @@ impl Complex64Tensor of TensorTrait { panic(array!['not supported!']) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index 405f665e1..64bef364c 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -134,7 +134,7 @@ impl FP16x16Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index 01057cbf2..8d45db74f 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -144,7 +144,7 @@ impl FP16x16WTensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index cfdb06536..58f860650 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -132,7 +132,7 @@ impl FP32x32Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index a33e549b5..366a07e84 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -131,7 +131,7 @@ impl FP64x64Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index d1fcc5477..f79c1b2b6 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -133,7 +133,7 @@ impl FP8x23Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index c061dff21..849c7b4ca 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -136,7 +136,7 @@ impl FP8x23WTensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index e04b4e8b5..fe4d0dcfb 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -129,7 +129,7 @@ impl I32Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index e9f076d9d..3e59a601a 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -127,7 +127,7 @@ impl I8Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index b8264345a..23c49a40f 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -126,7 +126,7 @@ impl U32Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/math/less.cairo b/src/operators/tensor/math/less.cairo index 35f9b4d73..bd28f64ac 100644 --- a/src/operators/tensor/math/less.cairo +++ b/src/operators/tensor/math/less.cairo @@ -6,15 +6,15 @@ use orion::operators::tensor::helpers::{ /// Cf: TensorTrait::less docstring fn less< T, - impl UsizeFTensor: TensorTrait, + impl BoolTensor: TensorTrait, impl TPartialOrd: PartialOrd, impl TCopy: Copy, impl TDrop: Drop >( y: @Tensor, z: @Tensor -) -> Tensor { +) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); @@ -26,9 +26,9 @@ fn less< let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted); if *(*y.data)[indices_self] < *(*z.data)[indices_other] { - result.append(1); + result.append(true); } else { - result.append(0); + result.append(false); } n += 1; From 4ae5a3831356030e7c6d3c41afa486ad703b8a14 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Thu, 21 Mar 2024 11:49:07 +0100 Subject: [PATCH 22/68] test and refactor equal --- .../operators/tensor/tensor.equal.md | 8 +-- .../framework/operators/tensor/tensor.less.md | 4 +- nodegen/node/equal.py | 20 +++---- nodegen/node/less.py | 20 +++---- src/operators/tensor/core.cairo | 10 ++-- .../tensor/implementations/tensor_bool.cairo | 2 +- .../implementations/tensor_complex64.cairo | 2 +- .../implementations/tensor_fp16x16.cairo | 2 +- .../implementations/tensor_fp16x16wide.cairo | 2 +- .../implementations/tensor_fp32x32.cairo | 2 +- .../implementations/tensor_fp64x64.cairo | 2 +- .../implementations/tensor_fp8x23.cairo | 2 +- .../implementations/tensor_fp8x23wide.cairo | 2 +- .../tensor/implementations/tensor_i32.cairo | 2 +- .../tensor/implementations/tensor_i8.cairo | 2 +- .../tensor/implementations/tensor_u32.cairo | 2 +- src/operators/tensor/math/equal.cairo | 10 ++-- tests/nodes/equal_fp16x16.cairo | 16 ++--- tests/nodes/equal_fp16x16/input_0.cairo | 24 ++++---- tests/nodes/equal_fp16x16/input_1.cairo | 32 +++++----- tests/nodes/equal_fp16x16/output_0.cairo | 58 +++++++++---------- tests/nodes/equal_fp16x16_broadcast.cairo | 16 ++--- .../equal_fp16x16_broadcast/input_0.cairo | 8 +-- .../equal_fp16x16_broadcast/input_1.cairo | 6 +- .../equal_fp16x16_broadcast/output_0.cairo | 12 ++-- tests/nodes/equal_fp8x23.cairo | 14 ++--- tests/nodes/equal_fp8x23/input_0.cairo | 30 +++++----- tests/nodes/equal_fp8x23/input_1.cairo | 28 ++++----- tests/nodes/equal_fp8x23/output_0.cairo | 58 +++++++++---------- tests/nodes/equal_fp8x23_broadcast.cairo | 14 ++--- .../equal_fp8x23_broadcast/input_0.cairo | 6 +- .../equal_fp8x23_broadcast/input_1.cairo | 6 +- .../equal_fp8x23_broadcast/output_0.cairo | 12 ++-- tests/nodes/equal_i32.cairo | 16 ++--- tests/nodes/equal_i32/input_0.cairo | 31 +++++----- tests/nodes/equal_i32/input_1.cairo | 31 +++++----- tests/nodes/equal_i32/output_0.cairo | 58 +++++++++---------- tests/nodes/equal_i32_broadcast.cairo | 16 ++--- tests/nodes/equal_i32_broadcast/input_0.cairo | 7 ++- tests/nodes/equal_i32_broadcast/input_1.cairo | 5 +- .../nodes/equal_i32_broadcast/output_0.cairo | 12 ++-- tests/nodes/equal_i8.cairo | 16 ++--- tests/nodes/equal_i8/input_0.cairo | 31 +++++----- tests/nodes/equal_i8/input_1.cairo | 35 +++++------ tests/nodes/equal_i8/output_0.cairo | 58 +++++++++---------- tests/nodes/equal_i8_broadcast.cairo | 16 ++--- tests/nodes/equal_i8_broadcast/input_0.cairo | 7 ++- tests/nodes/equal_i8_broadcast/input_1.cairo | 7 ++- tests/nodes/equal_i8_broadcast/output_0.cairo | 12 ++-- tests/nodes/equal_u32.cairo | 14 +++-- tests/nodes/equal_u32/input_0.cairo | 29 +++++----- tests/nodes/equal_u32/input_1.cairo | 29 +++++----- tests/nodes/equal_u32/output_0.cairo | 58 +++++++++---------- tests/nodes/equal_u32_broadcast.cairo | 14 +++-- tests/nodes/equal_u32_broadcast/input_0.cairo | 9 +-- tests/nodes/equal_u32_broadcast/input_1.cairo | 7 ++- .../nodes/equal_u32_broadcast/output_0.cairo | 12 ++-- tests/nodes/less_fp16x16.cairo | 14 ++--- tests/nodes/less_fp16x16/input_0.cairo | 26 ++++----- tests/nodes/less_fp16x16/input_1.cairo | 32 +++++----- tests/nodes/less_fp16x16/output_0.cairo | 58 +++++++++---------- tests/nodes/less_fp16x16_broadcast.cairo | 14 ++--- .../less_fp16x16_broadcast/input_0.cairo | 28 ++++----- .../less_fp16x16_broadcast/input_1.cairo | 6 +- .../less_fp16x16_broadcast/output_0.cairo | 58 +++++++++---------- tests/nodes/less_fp8x23.cairo | 12 ++-- tests/nodes/less_fp8x23/input_0.cairo | 28 ++++----- tests/nodes/less_fp8x23/input_1.cairo | 26 ++++----- tests/nodes/less_fp8x23/output_0.cairo | 58 +++++++++---------- tests/nodes/less_fp8x23_broadcast.cairo | 12 ++-- .../nodes/less_fp8x23_broadcast/input_0.cairo | 32 +++++----- .../nodes/less_fp8x23_broadcast/input_1.cairo | 6 +- .../less_fp8x23_broadcast/output_0.cairo | 58 +++++++++---------- tests/nodes/less_i32.cairo | 14 ++--- tests/nodes/less_i32/input_0.cairo | 27 ++++----- tests/nodes/less_i32/input_1.cairo | 29 +++++----- tests/nodes/less_i32/output_0.cairo | 58 +++++++++---------- tests/nodes/less_i32_broadcast.cairo | 14 ++--- tests/nodes/less_i32_broadcast/input_0.cairo | 31 +++++----- tests/nodes/less_i32_broadcast/input_1.cairo | 9 +-- tests/nodes/less_i32_broadcast/output_0.cairo | 58 +++++++++---------- tests/nodes/less_i8.cairo | 14 ++--- tests/nodes/less_i8/input_0.cairo | 31 +++++----- tests/nodes/less_i8/input_1.cairo | 27 ++++----- tests/nodes/less_i8/output_0.cairo | 58 +++++++++---------- tests/nodes/less_i8_broadcast.cairo | 14 ++--- tests/nodes/less_i8_broadcast/input_0.cairo | 27 ++++----- tests/nodes/less_i8_broadcast/input_1.cairo | 7 ++- tests/nodes/less_i8_broadcast/output_0.cairo | 58 +++++++++---------- tests/nodes/less_u32.cairo | 12 ++-- tests/nodes/less_u32/input_0.cairo | 35 +++++------ tests/nodes/less_u32/input_1.cairo | 33 ++++++----- tests/nodes/less_u32/output_0.cairo | 58 +++++++++---------- tests/nodes/less_u32_broadcast.cairo | 12 ++-- tests/nodes/less_u32_broadcast/input_0.cairo | 31 +++++----- tests/nodes/less_u32_broadcast/input_1.cairo | 7 ++- tests/nodes/less_u32_broadcast/output_0.cairo | 58 +++++++++---------- 97 files changed, 1093 insertions(+), 1061 deletions(-) diff --git a/docs/framework/operators/tensor/tensor.equal.md b/docs/framework/operators/tensor/tensor.equal.md index 6e393c989..e8e91c20f 100644 --- a/docs/framework/operators/tensor/tensor.equal.md +++ b/docs/framework/operators/tensor/tensor.equal.md @@ -1,7 +1,7 @@ #tensor.equal ```rust - fn equal(self: @Tensor, other: @Tensor) -> Tensor; + fn equal(self: @Tensor, other: @Tensor) -> Tensor; ``` Check if two tensors are equal element-wise. @@ -20,7 +20,7 @@ The input tensors must have either: ## Returns -A new `Tensor` of booleans (1 if equal, 0 otherwise) with the same shape as the broadcasted inputs. +A new `Tensor` of booleans (1 if equal, 0 otherwise) with the same shape as the broadcasted inputs. ## Examples @@ -43,7 +43,7 @@ fn eq_example() -> Tensor { // We can call `equal` function as follows. return tensor_1.equal(@tensor_2); } ->>> [1,1,1,1,1,0,0,0] +>>> [true,true,true,true,true,false,false,false] ``` Case 2: Compare tensors with different shapes @@ -63,5 +63,5 @@ fn eq_example() -> Tensor { // We can call `equal` function as follows. return tensor_1.equal(@tensor_2); } ->>> [1,1,1,0,0,0,0,0,0] +>>> [true,true,true,false,false,false,false,false,false] ``` diff --git a/docs/framework/operators/tensor/tensor.less.md b/docs/framework/operators/tensor/tensor.less.md index d5d264d8a..797e51f89 100644 --- a/docs/framework/operators/tensor/tensor.less.md +++ b/docs/framework/operators/tensor/tensor.less.md @@ -53,7 +53,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn less_example() -> Tensor { +fn less_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); @@ -63,5 +63,5 @@ fn less_example() -> Tensor { // We can call `less` function as follows. return tensor_1.less(@tensor_2); } ->>> [0,0,0,0,0,0,0,1,1] +>>> [false,false,false,false,false,false,false,true,true] ``` diff --git a/nodegen/node/equal.py b/nodegen/node/equal.py index f995ae999..9474d3eca 100644 --- a/nodegen/node/equal.py +++ b/nodegen/node/equal.py @@ -13,7 +13,7 @@ def default(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "equal_u32" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -25,7 +25,7 @@ def broadcast(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "equal_u32_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -42,7 +42,7 @@ def default(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "equal_i32" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -54,7 +54,7 @@ def broadcast(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "equal_i32_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -71,7 +71,7 @@ def default(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "equal_i8" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -83,7 +83,7 @@ def broadcast(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "equal_i8_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -102,7 +102,7 @@ def default(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "equal_fp8x23" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -116,7 +116,7 @@ def broadcast(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "equal_fp8x23_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -135,7 +135,7 @@ def default(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "equal_fp16x16" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -149,7 +149,7 @@ def broadcast(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "equal_fp16x16_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) diff --git a/nodegen/node/less.py b/nodegen/node/less.py index 20b39263d..14af93201 100644 --- a/nodegen/node/less.py +++ b/nodegen/node/less.py @@ -13,7 +13,7 @@ def default(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "less_u32" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -25,7 +25,7 @@ def broadcast(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "less_u32_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -42,7 +42,7 @@ def default(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "less_i32" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -54,7 +54,7 @@ def broadcast(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "less_i32_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -71,7 +71,7 @@ def default(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "less_i8" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -83,7 +83,7 @@ def broadcast(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "less_i8_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -102,7 +102,7 @@ def default(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "less_fp8x23" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -116,7 +116,7 @@ def broadcast(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "less_fp8x23_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -135,7 +135,7 @@ def default(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "less_fp16x16" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -149,7 +149,7 @@ def broadcast(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "less_fp16x16_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 06791b024..e3dbc6e06 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -1085,7 +1085,7 @@ trait TensorTrait { /// #tensor.equal /// /// ```rust - /// fn equal(self: @Tensor, other: @Tensor) -> Tensor; + /// fn equal(self: @Tensor, other: @Tensor) -> Tensor; /// ``` /// /// Check if two tensors are equal element-wise. @@ -1104,7 +1104,7 @@ trait TensorTrait { /// /// ## Returns /// - /// A new `Tensor` of booleans (1 if equal, 0 otherwise) with the same shape as the broadcasted inputs. + /// A new `Tensor` of booleans (1 if equal, 0 otherwise) with the same shape as the broadcasted inputs. /// /// ## Examples /// @@ -1127,7 +1127,7 @@ trait TensorTrait { /// // We can call `equal` function as follows. /// return tensor_1.equal(@tensor_2); /// } - /// >>> [1,1,1,1,1,0,0,0] + /// >>> [true,true,true,true,true,false,false,false] /// ``` /// /// Case 2: Compare tensors with different shapes @@ -1147,10 +1147,10 @@ trait TensorTrait { /// // We can call `equal` function as follows. /// return tensor_1.equal(@tensor_2); /// } - /// >>> [1,1,1,0,0,0,0,0,0] + /// >>> [true,true,true,false,false,false,false,false,false] /// ``` /// - fn equal(self: @Tensor, other: @Tensor) -> Tensor; + fn equal(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.greater /// /// ```rust diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index 4d742454e..110cbb20e 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -109,7 +109,7 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 229a955a5..6870de2e2 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -128,7 +128,7 @@ impl Complex64Tensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index 64bef364c..6a22c9a6b 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -122,7 +122,7 @@ impl FP16x16Tensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index 8d45db74f..ddca96466 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -132,7 +132,7 @@ impl FP16x16WTensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 58f860650..72d629224 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -120,7 +120,7 @@ impl FP32x32Tensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 366a07e84..8d0b90f5e 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -119,7 +119,7 @@ impl FP64x64Tensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index f79c1b2b6..c319bb466 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -121,7 +121,7 @@ impl FP8x23Tensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index 849c7b4ca..c1b805677 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -124,7 +124,7 @@ impl FP8x23WTensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index fe4d0dcfb..42b7bb4db 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -117,7 +117,7 @@ impl I32Tensor of TensorTrait { panic(array!['not supported!']) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 3e59a601a..53f6424b0 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -115,7 +115,7 @@ impl I8Tensor of TensorTrait { panic(array!['not supported!']) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index 23c49a40f..f9af20b65 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -114,7 +114,7 @@ impl U32Tensor of TensorTrait { panic(array!['not supported!']) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } diff --git a/src/operators/tensor/math/equal.cairo b/src/operators/tensor/math/equal.cairo index d2693acf9..7b2a1cb6c 100644 --- a/src/operators/tensor/math/equal.cairo +++ b/src/operators/tensor/math/equal.cairo @@ -6,15 +6,15 @@ use orion::operators::tensor::helpers::{ /// Cf: TensorTrait::equal docstring fn equal< T, - impl UsizeFTensor: TensorTrait, + impl BoolTensor: TensorTrait, impl TPartialEq: PartialEq, impl TCopy: Copy, impl TDrop: Drop >( y: @Tensor, z: @Tensor -) -> Tensor { +) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); @@ -26,9 +26,9 @@ fn equal< let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted); if *(*y.data)[indices_self] == *(*z.data)[indices_other] { - result.append(1); + result.append(true); } else { - result.append(0); + result.append(false); } n += 1; diff --git a/tests/nodes/equal_fp16x16.cairo b/tests/nodes/equal_fp16x16.cairo index 38c3753cd..a71efdeda 100644 --- a/tests/nodes/equal_fp16x16.cairo +++ b/tests/nodes/equal_fp16x16.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP16x16TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_equal_fp16x16() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.equal(@input_1); + let y_0 = input_0.equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/equal_fp16x16/input_0.cairo b/tests/nodes/equal_fp16x16/input_0.cairo index 1c0bdb213..75bbd1b1a 100644 --- a/tests/nodes/equal_fp16x16/input_0.cairo +++ b/tests/nodes/equal_fp16x16/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -13,29 +13,29 @@ fn input_0() -> Tensor { data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16/input_1.cairo b/tests/nodes/equal_fp16x16/input_1.cairo index c6e8fe0f6..76be7abd4 100644 --- a/tests/nodes/equal_fp16x16/input_1.cairo +++ b/tests/nodes/equal_fp16x16/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16/output_0.cairo b/tests/nodes/equal_fp16x16/output_0.cairo index 2078b6e18..5806c9c8f 100644 --- a/tests/nodes/equal_fp16x16/output_0.cairo +++ b/tests/nodes/equal_fp16x16/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16_broadcast.cairo b/tests/nodes/equal_fp16x16_broadcast.cairo index 74eb5217e..1ca98f2ab 100644 --- a/tests/nodes/equal_fp16x16_broadcast.cairo +++ b/tests/nodes/equal_fp16x16_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP16x16TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_equal_fp16x16_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.equal(@input_1); + let y_0 = input_0.equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/equal_fp16x16_broadcast/input_0.cairo b/tests/nodes/equal_fp16x16_broadcast/input_0.cairo index a378b6d18..6e630308c 100644 --- a/tests/nodes/equal_fp16x16_broadcast/input_0.cairo +++ b/tests/nodes/equal_fp16x16_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16_broadcast/input_1.cairo b/tests/nodes/equal_fp16x16_broadcast/input_1.cairo index 9a7b2b64d..b3280a1c0 100644 --- a/tests/nodes/equal_fp16x16_broadcast/input_1.cairo +++ b/tests/nodes/equal_fp16x16_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16_broadcast/output_0.cairo b/tests/nodes/equal_fp16x16_broadcast/output_0.cairo index d2fab9fd0..e42f5e1e1 100644 --- a/tests/nodes/equal_fp16x16_broadcast/output_0.cairo +++ b/tests/nodes/equal_fp16x16_broadcast/output_0.cairo @@ -1,16 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); - data.append(0); - data.append(0); + data.append(false); + data.append(false); + data.append(false); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp8x23.cairo b/tests/nodes/equal_fp8x23.cairo index 112c71e8b..6a0d2aac1 100644 --- a/tests/nodes/equal_fp8x23.cairo +++ b/tests/nodes/equal_fp8x23.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::U32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::BoolTensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; #[test] #[available_gas(2000000000)] fn test_equal_fp8x23() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.equal(@input_1); + let y_0 = input_0.equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/equal_fp8x23/input_0.cairo b/tests/nodes/equal_fp8x23/input_0.cairo index 4f138ecb5..b13b78476 100644 --- a/tests/nodes/equal_fp8x23/input_0.cairo +++ b/tests/nodes/equal_fp8x23/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp8x23/input_1.cairo b/tests/nodes/equal_fp8x23/input_1.cairo index b0010c344..4178c68b4 100644 --- a/tests/nodes/equal_fp8x23/input_1.cairo +++ b/tests/nodes/equal_fp8x23/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -11,31 +11,31 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp8x23/output_0.cairo b/tests/nodes/equal_fp8x23/output_0.cairo index df58147cb..b38d8f0d7 100644 --- a/tests/nodes/equal_fp8x23/output_0.cairo +++ b/tests/nodes/equal_fp8x23/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp8x23_broadcast.cairo b/tests/nodes/equal_fp8x23_broadcast.cairo index 590193505..d40ab0cbc 100644 --- a/tests/nodes/equal_fp8x23_broadcast.cairo +++ b/tests/nodes/equal_fp8x23_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::U32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::BoolTensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; #[test] #[available_gas(2000000000)] fn test_equal_fp8x23_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.equal(@input_1); + let y_0 = input_0.equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/equal_fp8x23_broadcast/input_0.cairo b/tests/nodes/equal_fp8x23_broadcast/input_0.cairo index 31927729d..aa6318ba4 100644 --- a/tests/nodes/equal_fp8x23_broadcast/input_0.cairo +++ b/tests/nodes/equal_fp8x23_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp8x23_broadcast/input_1.cairo b/tests/nodes/equal_fp8x23_broadcast/input_1.cairo index 6ffcceb3b..8c280300a 100644 --- a/tests/nodes/equal_fp8x23_broadcast/input_1.cairo +++ b/tests/nodes/equal_fp8x23_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp8x23_broadcast/output_0.cairo b/tests/nodes/equal_fp8x23_broadcast/output_0.cairo index 417a71252..e42f5e1e1 100644 --- a/tests/nodes/equal_fp8x23_broadcast/output_0.cairo +++ b/tests/nodes/equal_fp8x23_broadcast/output_0.cairo @@ -1,16 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); + data.append(false); + data.append(false); + data.append(false); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i32.cairo b/tests/nodes/equal_i32.cairo index c2612bc6a..e9678d8b0 100644 --- a/tests/nodes/equal_i32.cairo +++ b/tests/nodes/equal_i32.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_equal_i32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.equal(@input_1); + let y_0 = input_0.equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/equal_i32/input_0.cairo b/tests/nodes/equal_i32/input_0.cairo index 0e2586cdb..1ddc0739f 100644 --- a/tests/nodes/equal_i32/input_0.cairo +++ b/tests/nodes/equal_i32/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-3); - data.append(2); - data.append(2); - data.append(-3); - data.append(1); - data.append(-3); data.append(0); - data.append(-1); data.append(-3); data.append(-3); - data.append(-1); - data.append(-1); - data.append(0); data.append(1); - data.append(-1); - data.append(0); + data.append(-2); + data.append(2); + data.append(-2); data.append(0); data.append(-3); - data.append(1); data.append(-3); data.append(0); - data.append(1); data.append(-3); + data.append(-2); + data.append(2); + data.append(-2); + data.append(-2); + data.append(-2); data.append(-1); + data.append(0); data.append(2); + data.append(-1); data.append(-2); data.append(-3); + data.append(2); + data.append(0); + data.append(-3); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i32/input_1.cairo b/tests/nodes/equal_i32/input_1.cairo index c6b5a6fbe..1721bc693 100644 --- a/tests/nodes/equal_i32/input_1.cairo +++ b/tests/nodes/equal_i32/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-2); - data.append(-3); - data.append(-3); - data.append(-2); data.append(-3); + data.append(-1); + data.append(1); + data.append(0); + data.append(0); data.append(2); + data.append(-2); + data.append(-2); + data.append(-1); data.append(0); - data.append(1); data.append(-1); data.append(-3); - data.append(-1); data.append(2); + data.append(-2); data.append(-1); - data.append(1); data.append(-3); - data.append(-1); data.append(2); - data.append(2); - data.append(-2); + data.append(1); + data.append(1); data.append(-1); - data.append(-2); - data.append(-3); + data.append(2); + data.append(1); data.append(-1); data.append(-1); - data.append(0); - data.append(0); + data.append(-2); + data.append(2); data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i32/output_0.cairo b/tests/nodes/equal_i32/output_0.cairo index 0cdc2c69d..1e3bb5a60 100644 --- a/tests/nodes/equal_i32/output_0.cairo +++ b/tests/nodes/equal_i32/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i32_broadcast.cairo b/tests/nodes/equal_i32_broadcast.cairo index 012a7e165..8acfb1db1 100644 --- a/tests/nodes/equal_i32_broadcast.cairo +++ b/tests/nodes/equal_i32_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_equal_i32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.equal(@input_1); + let y_0 = input_0.equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/equal_i32_broadcast/input_0.cairo b/tests/nodes/equal_i32_broadcast/input_0.cairo index cefd51703..ad3cd4116 100644 --- a/tests/nodes/equal_i32_broadcast/input_0.cairo +++ b/tests/nodes/equal_i32_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,8 +10,8 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); - data.append(-2); - data.append(0); + data.append(-3); data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i32_broadcast/input_1.cairo b/tests/nodes/equal_i32_broadcast/input_1.cairo index fed8199ca..8b33cf367 100644 --- a/tests/nodes/equal_i32_broadcast/input_1.cairo +++ b/tests/nodes/equal_i32_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(2); + data.append(1); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i32_broadcast/output_0.cairo b/tests/nodes/equal_i32_broadcast/output_0.cairo index 9a2391c78..75e094812 100644 --- a/tests/nodes/equal_i32_broadcast/output_0.cairo +++ b/tests/nodes/equal_i32_broadcast/output_0.cairo @@ -1,16 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(1); + data.append(true); + data.append(false); + data.append(true); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i8.cairo b/tests/nodes/equal_i8.cairo index e19689a8d..e5c0184fc 100644 --- a/tests/nodes/equal_i8.cairo +++ b/tests/nodes/equal_i8.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::U32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; +use orion::operators::tensor::BoolTensor; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_equal_i8() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.equal(@input_1); + let y_0 = input_0.equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/equal_i8/input_0.cairo b/tests/nodes/equal_i8/input_0.cairo index 09ea9171a..c89afb29b 100644 --- a/tests/nodes/equal_i8/input_0.cairo +++ b/tests/nodes/equal_i8/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-2); data.append(0); - data.append(-3); - data.append(-3); - data.append(1); - data.append(-3); data.append(-1); - data.append(1); data.append(-3); - data.append(2); data.append(0); + data.append(0); + data.append(-2); + data.append(2); data.append(-1); + data.append(0); + data.append(-2); + data.append(1); + data.append(-2); data.append(1); data.append(-3); + data.append(2); data.append(-3); data.append(-3); - data.append(1); - data.append(2); data.append(2); data.append(-3); - data.append(0); - data.append(0); - data.append(-1); - data.append(-2); data.append(-3); + data.append(-3); + data.append(-3); + data.append(2); + data.append(-2); + data.append(-1); + data.append(2); data.append(-1); - data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i8/input_1.cairo b/tests/nodes/equal_i8/input_1.cairo index b1ab51213..c27187429 100644 --- a/tests/nodes/equal_i8/input_1.cairo +++ b/tests/nodes/equal_i8/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-1); - data.append(-1); - data.append(2); data.append(-3); - data.append(1); + data.append(0); data.append(-3); data.append(-2); - data.append(1); - data.append(0); - data.append(-1); - data.append(-1); - data.append(-1); - data.append(0); + data.append(-3); + data.append(2); + data.append(2); + data.append(-2); + data.append(2); + data.append(-2); data.append(-1); data.append(1); - data.append(1); + data.append(2); data.append(-1); - data.append(-2); data.append(-1); - data.append(2); - data.append(-3); data.append(1); + data.append(0); + data.append(2); + data.append(-2); data.append(-2); - data.append(1); data.append(-3); - data.append(1); data.append(-2); + data.append(2); + data.append(-1); + data.append(-2); + data.append(0); + data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i8/output_0.cairo b/tests/nodes/equal_i8/output_0.cairo index 452bd9d81..798787e19 100644 --- a/tests/nodes/equal_i8/output_0.cairo +++ b/tests/nodes/equal_i8/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i8_broadcast.cairo b/tests/nodes/equal_i8_broadcast.cairo index cc1fd18de..2713598b4 100644 --- a/tests/nodes/equal_i8_broadcast.cairo +++ b/tests/nodes/equal_i8_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::U32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; +use orion::operators::tensor::BoolTensor; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_equal_i8_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.equal(@input_1); + let y_0 = input_0.equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/equal_i8_broadcast/input_0.cairo b/tests/nodes/equal_i8_broadcast/input_0.cairo index 428f70adf..3f1c3803b 100644 --- a/tests/nodes/equal_i8_broadcast/input_0.cairo +++ b/tests/nodes/equal_i8_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,8 +9,8 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-2); - data.append(2); + data.append(1); + data.append(1); data.append(-1); data.append(2); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/equal_i8_broadcast/input_1.cairo b/tests/nodes/equal_i8_broadcast/input_1.cairo index 51830d474..e09cd290a 100644 --- a/tests/nodes/equal_i8_broadcast/input_1.cairo +++ b/tests/nodes/equal_i8_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-2); - data.append(-1); + data.append(0); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i8_broadcast/output_0.cairo b/tests/nodes/equal_i8_broadcast/output_0.cairo index d2fab9fd0..44d7202af 100644 --- a/tests/nodes/equal_i8_broadcast/output_0.cairo +++ b/tests/nodes/equal_i8_broadcast/output_0.cairo @@ -1,16 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); - data.append(0); - data.append(0); + data.append(false); + data.append(false); + data.append(false); + data.append(true); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32.cairo b/tests/nodes/equal_u32.cairo index 6591ecbc6..ef7a18f53 100644 --- a/tests/nodes/equal_u32.cairo +++ b/tests/nodes/equal_u32.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_equal_u32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.equal(@input_1); + let y_0 = input_0.equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/equal_u32/input_0.cairo b/tests/nodes/equal_u32/input_0.cairo index 9c3b88326..54f581c79 100644 --- a/tests/nodes/equal_u32/input_0.cairo +++ b/tests/nodes/equal_u32/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); data.append(5); - data.append(0); - data.append(2); data.append(5); + data.append(1); data.append(3); - data.append(2); data.append(5); + data.append(3); + data.append(2); + data.append(1); data.append(5); data.append(0); + data.append(4); + data.append(5); data.append(5); - data.append(0); - data.append(0); data.append(2); data.append(5); - data.append(4); - data.append(1); data.append(3); + data.append(4); data.append(1); - data.append(2); data.append(0); - data.append(5); data.append(2); - data.append(4); - data.append(2); - data.append(1); + data.append(5); + data.append(3); + data.append(3); + data.append(5); + data.append(5); + data.append(5); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32/input_1.cairo b/tests/nodes/equal_u32/input_1.cairo index 39331ddba..4545dcb02 100644 --- a/tests/nodes/equal_u32/input_1.cairo +++ b/tests/nodes/equal_u32/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,31 +11,31 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(4); - data.append(2); data.append(3); + data.append(5); + data.append(2); data.append(4); data.append(1); + data.append(5); data.append(2); data.append(0); - data.append(0); - data.append(4); - data.append(0); data.append(2); - data.append(0); + data.append(1); + data.append(4); + data.append(3); data.append(0); data.append(5); - data.append(3); + data.append(1); data.append(5); - data.append(2); - data.append(0); - data.append(2); - data.append(3); data.append(5); - data.append(4); + data.append(3); + data.append(0); data.append(1); - data.append(2); + data.append(3); + data.append(0); + data.append(3); data.append(5); data.append(3); - data.append(4); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32/output_0.cairo b/tests/nodes/equal_u32/output_0.cairo index 339b2c840..573b6cac7 100644 --- a/tests/nodes/equal_u32/output_0.cairo +++ b/tests/nodes/equal_u32/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32_broadcast.cairo b/tests/nodes/equal_u32_broadcast.cairo index 30a7868a5..44d663e38 100644 --- a/tests/nodes/equal_u32_broadcast.cairo +++ b/tests/nodes/equal_u32_broadcast.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_equal_u32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.equal(@input_1); + let y_0 = input_0.equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/equal_u32_broadcast/input_0.cairo b/tests/nodes/equal_u32_broadcast/input_0.cairo index d47d6b523..bb26dfe29 100644 --- a/tests/nodes/equal_u32_broadcast/input_0.cairo +++ b/tests/nodes/equal_u32_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(5); - data.append(5); - data.append(2); + data.append(3); data.append(0); + data.append(3); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32_broadcast/input_1.cairo b/tests/nodes/equal_u32_broadcast/input_1.cairo index 7c4c61dff..39ab80b0c 100644 --- a/tests/nodes/equal_u32_broadcast/input_1.cairo +++ b/tests/nodes/equal_u32_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(4); - data.append(4); + data.append(3); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32_broadcast/output_0.cairo b/tests/nodes/equal_u32_broadcast/output_0.cairo index 417a71252..f757d7d01 100644 --- a/tests/nodes/equal_u32_broadcast/output_0.cairo +++ b/tests/nodes/equal_u32_broadcast/output_0.cairo @@ -1,16 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); + data.append(true); + data.append(true); + data.append(true); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16.cairo b/tests/nodes/less_fp16x16.cairo index 04ac88b63..0f464463a 100644 --- a/tests/nodes/less_fp16x16.cairo +++ b/tests/nodes/less_fp16x16.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::BoolTensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_fp16x16() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less(@input_1); + let y_0 = input_0.less(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_fp16x16/input_0.cairo b/tests/nodes/less_fp16x16/input_0.cairo index 41fa7524d..12c4be2fd 100644 --- a/tests/nodes/less_fp16x16/input_0.cairo +++ b/tests/nodes/less_fp16x16/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16/input_1.cairo b/tests/nodes/less_fp16x16/input_1.cairo index fe0e56e41..8c6b3809a 100644 --- a/tests/nodes/less_fp16x16/input_1.cairo +++ b/tests/nodes/less_fp16x16/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16/output_0.cairo b/tests/nodes/less_fp16x16/output_0.cairo index ff7a8e63d..19b525d8f 100644 --- a/tests/nodes/less_fp16x16/output_0.cairo +++ b/tests/nodes/less_fp16x16/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(0); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16_broadcast.cairo b/tests/nodes/less_fp16x16_broadcast.cairo index 787c07448..750e149d7 100644 --- a/tests/nodes/less_fp16x16_broadcast.cairo +++ b/tests/nodes/less_fp16x16_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::BoolTensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_fp16x16_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less(@input_1); + let y_0 = input_0.less(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_fp16x16_broadcast/input_0.cairo b/tests/nodes/less_fp16x16_broadcast/input_0.cairo index 18782c0dd..cab41faaa 100644 --- a/tests/nodes/less_fp16x16_broadcast/input_0.cairo +++ b/tests/nodes/less_fp16x16_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16_broadcast/input_1.cairo b/tests/nodes/less_fp16x16_broadcast/input_1.cairo index 743355c3d..65ffb99b7 100644 --- a/tests/nodes/less_fp16x16_broadcast/input_1.cairo +++ b/tests/nodes/less_fp16x16_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -10,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16_broadcast/output_0.cairo b/tests/nodes/less_fp16x16_broadcast/output_0.cairo index 7d4613a88..b638d7ea1 100644 --- a/tests/nodes/less_fp16x16_broadcast/output_0.cairo +++ b/tests/nodes/less_fp16x16_broadcast/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(true); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23.cairo b/tests/nodes/less_fp8x23.cairo index 6fe7b08b8..608b0e1e0 100644 --- a/tests/nodes/less_fp8x23.cairo +++ b/tests/nodes/less_fp8x23.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::BoolTensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_fp8x23() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less(@input_1); + let y_0 = input_0.less(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_fp8x23/input_0.cairo b/tests/nodes/less_fp8x23/input_0.cairo index fbcd9f2a8..aee69b9c7 100644 --- a/tests/nodes/less_fp8x23/input_0.cairo +++ b/tests/nodes/less_fp8x23/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23/input_1.cairo b/tests/nodes/less_fp8x23/input_1.cairo index e27ba84da..a013faaea 100644 --- a/tests/nodes/less_fp8x23/input_1.cairo +++ b/tests/nodes/less_fp8x23/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23/output_0.cairo b/tests/nodes/less_fp8x23/output_0.cairo index 33906ca90..180bf7d2e 100644 --- a/tests/nodes/less_fp8x23/output_0.cairo +++ b/tests/nodes/less_fp8x23/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(0); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(false); + data.append(true); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23_broadcast.cairo b/tests/nodes/less_fp8x23_broadcast.cairo index e8b3155c5..19bdf417c 100644 --- a/tests/nodes/less_fp8x23_broadcast.cairo +++ b/tests/nodes/less_fp8x23_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::BoolTensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_fp8x23_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less(@input_1); + let y_0 = input_0.less(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_fp8x23_broadcast/input_0.cairo b/tests/nodes/less_fp8x23_broadcast/input_0.cairo index 88ad7277b..425ca9e2e 100644 --- a/tests/nodes/less_fp8x23_broadcast/input_0.cairo +++ b/tests/nodes/less_fp8x23_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23_broadcast/input_1.cairo b/tests/nodes/less_fp8x23_broadcast/input_1.cairo index 29b68e7c3..d7e956748 100644 --- a/tests/nodes/less_fp8x23_broadcast/input_1.cairo +++ b/tests/nodes/less_fp8x23_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -10,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23_broadcast/output_0.cairo b/tests/nodes/less_fp8x23_broadcast/output_0.cairo index fbf242193..05547a4b4 100644 --- a/tests/nodes/less_fp8x23_broadcast/output_0.cairo +++ b/tests/nodes/less_fp8x23_broadcast/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); + data.append(true); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32.cairo b/tests/nodes/less_i32.cairo index 4a251b995..9c5cf2bbf 100644 --- a/tests/nodes/less_i32.cairo +++ b/tests/nodes/less_i32.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::BoolTensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_i32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less(@input_1); + let y_0 = input_0.less(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_i32/input_0.cairo b/tests/nodes/less_i32/input_0.cairo index ab59d73f2..d89055483 100644 --- a/tests/nodes/less_i32/input_0.cairo +++ b/tests/nodes/less_i32/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-1); + data.append(1); + data.append(0); data.append(-2); data.append(2); - data.append(2); - data.append(2); - data.append(-2); + data.append(1); data.append(1); data.append(1); data.append(1); data.append(0); - data.append(-2); - data.append(-3); data.append(-1); data.append(-1); - data.append(-2); - data.append(2); - data.append(-2); - data.append(-1); - data.append(-3); data.append(0); data.append(0); data.append(-3); + data.append(1); + data.append(-1); + data.append(-3); data.append(-3); + data.append(2); + data.append(-1); + data.append(0); data.append(-2); data.append(2); + data.append(-1); + data.append(1); data.append(-3); - data.append(0); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32/input_1.cairo b/tests/nodes/less_i32/input_1.cairo index cadacc785..a33bfe41b 100644 --- a/tests/nodes/less_i32/input_1.cairo +++ b/tests/nodes/less_i32/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(2); data.append(0); - data.append(-1); - data.append(0); - data.append(-2); + data.append(1); data.append(2); data.append(1); data.append(-1); + data.append(-2); + data.append(0); data.append(-3); - data.append(-3); + data.append(0); + data.append(1); + data.append(2); data.append(-2); data.append(-2); data.append(2); - data.append(-1); - data.append(-3); data.append(2); + data.append(2); + data.append(0); data.append(1); + data.append(-3); + data.append(-3); + data.append(-2); data.append(-2); - data.append(-1); data.append(-2); - data.append(1); data.append(-3); - data.append(-1); - data.append(0); - data.append(1); - data.append(0); + data.append(-3); data.append(2); - data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32/output_0.cairo b/tests/nodes/less_i32/output_0.cairo index bff093e8b..935f05c3b 100644 --- a/tests/nodes/less_i32/output_0.cairo +++ b/tests/nodes/less_i32/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(0); + data.append(true); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32_broadcast.cairo b/tests/nodes/less_i32_broadcast.cairo index 552150976..f82c428a8 100644 --- a/tests/nodes/less_i32_broadcast.cairo +++ b/tests/nodes/less_i32_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::BoolTensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_i32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less(@input_1); + let y_0 = input_0.less(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_i32_broadcast/input_0.cairo b/tests/nodes/less_i32_broadcast/input_0.cairo index e49eaf809..bc2944af3 100644 --- a/tests/nodes/less_i32_broadcast/input_0.cairo +++ b/tests/nodes/less_i32_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-3); + data.append(2); + data.append(1); data.append(-2); + data.append(2); data.append(1); + data.append(0); data.append(-3); - data.append(2); + data.append(-1); data.append(-1); data.append(-3); data.append(0); - data.append(2); - data.append(1); + data.append(-3); + data.append(-3); + data.append(-1); data.append(-1); - data.append(0); - data.append(1); data.append(1); + data.append(2); data.append(0); + data.append(-2); + data.append(-2); data.append(1); data.append(-3); - data.append(2); data.append(1); - data.append(2); - data.append(0); - data.append(0); - data.append(0); - data.append(-3); - data.append(0); - data.append(2); + data.append(-2); + data.append(-2); + data.append(-1); data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32_broadcast/input_1.cairo b/tests/nodes/less_i32_broadcast/input_1.cairo index 99c50d44f..4f6d1b719 100644 --- a/tests/nodes/less_i32_broadcast/input_1.cairo +++ b/tests/nodes/less_i32_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(-3); - data.append(0); - data.append(1); + data.append(-2); + data.append(2); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32_broadcast/output_0.cairo b/tests/nodes/less_i32_broadcast/output_0.cairo index 79967537c..3ffcf6eac 100644 --- a/tests/nodes/less_i32_broadcast/output_0.cairo +++ b/tests/nodes/less_i32_broadcast/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(1); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(true); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8.cairo b/tests/nodes/less_i8.cairo index 085a6da35..00d6dddd4 100644 --- a/tests/nodes/less_i8.cairo +++ b/tests/nodes/less_i8.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; +use orion::operators::tensor::BoolTensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I8TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::I8TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_i8() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less(@input_1); + let y_0 = input_0.less(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_i8/input_0.cairo b/tests/nodes/less_i8/input_0.cairo index 28dd5a905..b2c30bed7 100644 --- a/tests/nodes/less_i8/input_0.cairo +++ b/tests/nodes/less_i8/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(1); + data.append(-3); + data.append(1); + data.append(1); data.append(-3); data.append(0); data.append(-3); + data.append(0); + data.append(2); data.append(-2); - data.append(-1); data.append(2); - data.append(-1); - data.append(-3); - data.append(-1); - data.append(-3); - data.append(-3); - data.append(-1); - data.append(-1); data.append(-2); + data.append(0); data.append(-2); - data.append(-3); data.append(-1); - data.append(1); data.append(-2); - data.append(0); + data.append(-3); + data.append(-1); + data.append(-1); data.append(-3); data.append(2); + data.append(2); + data.append(1); data.append(-3); - data.append(-2); + data.append(1); data.append(2); - data.append(0); - data.append(0); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8/input_1.cairo b/tests/nodes/less_i8/input_1.cairo index 9dc4e7a6c..cde86d6da 100644 --- a/tests/nodes/less_i8/input_1.cairo +++ b/tests/nodes/less_i8/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(2); data.append(-3); - data.append(1); - data.append(-2); data.append(-3); - data.append(2); + data.append(-2); + data.append(1); data.append(-1); + data.append(-3); + data.append(0); data.append(0); + data.append(-2); + data.append(-1); data.append(-1); data.append(-2); - data.append(1); + data.append(0); + data.append(-3); data.append(0); data.append(2); - data.append(-2); + data.append(0); data.append(1); - data.append(-2); - data.append(-3); data.append(2); + data.append(-3); data.append(0); data.append(-1); data.append(0); - data.append(0); + data.append(-1); data.append(-2); - data.append(2); data.append(-1); - data.append(2); - data.append(0); - data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8/output_0.cairo b/tests/nodes/less_i8/output_0.cairo index 9398cc8d3..0f911f707 100644 --- a/tests/nodes/less_i8/output_0.cairo +++ b/tests/nodes/less_i8/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8_broadcast.cairo b/tests/nodes/less_i8_broadcast.cairo index fb705a81d..d100935c3 100644 --- a/tests/nodes/less_i8_broadcast.cairo +++ b/tests/nodes/less_i8_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; +use orion::operators::tensor::BoolTensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I8TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::I8TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_i8_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less(@input_1); + let y_0 = input_0.less(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_i8_broadcast/input_0.cairo b/tests/nodes/less_i8_broadcast/input_0.cairo index eac6c02fd..19ab58b2d 100644 --- a/tests/nodes/less_i8_broadcast/input_0.cairo +++ b/tests/nodes/less_i8_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-2); - data.append(-2); - data.append(1); - data.append(1); - data.append(0); - data.append(0); data.append(-1); data.append(-3); data.append(0); data.append(2); + data.append(0); data.append(-3); data.append(1); - data.append(1); data.append(2); - data.append(-3); + data.append(0); data.append(2); data.append(-3); - data.append(2); + data.append(-1); + data.append(1); + data.append(-2); + data.append(-3); + data.append(-2); data.append(1); - data.append(0); data.append(-1); + data.append(2); data.append(0); data.append(-1); - data.append(1); + data.append(-2); data.append(0); + data.append(1); data.append(-2); - data.append(2); + data.append(-2); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8_broadcast/input_1.cairo b/tests/nodes/less_i8_broadcast/input_1.cairo index d593d06e4..88ed8e242 100644 --- a/tests/nodes/less_i8_broadcast/input_1.cairo +++ b/tests/nodes/less_i8_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(-1); - data.append(-2); + data.append(1); data.append(-3); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8_broadcast/output_0.cairo b/tests/nodes/less_i8_broadcast/output_0.cairo index f68e15ea8..ef4e9369d 100644 --- a/tests/nodes/less_i8_broadcast/output_0.cairo +++ b/tests/nodes/less_i8_broadcast/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32.cairo b/tests/nodes/less_u32.cairo index 412895527..b6546f1a2 100644 --- a/tests/nodes/less_u32.cairo +++ b/tests/nodes/less_u32.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; +use orion::operators::tensor::BoolTensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_u32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less(@input_1); + let y_0 = input_0.less(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_u32/input_0.cairo b/tests/nodes/less_u32/input_0.cairo index 15b2924f3..89a0d8072 100644 --- a/tests/nodes/less_u32/input_0.cairo +++ b/tests/nodes/less_u32/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(3); + data.append(3); + data.append(1); + data.append(3); + data.append(1); data.append(0); - data.append(2); data.append(0); - data.append(4); - data.append(5); + data.append(1); data.append(0); + data.append(3); + data.append(2); data.append(4); data.append(4); - data.append(1); data.append(5); data.append(3); - data.append(3); - data.append(2); - data.append(5); - data.append(5); - data.append(4); data.append(1); data.append(1); - data.append(4); - data.append(4); - data.append(2); - data.append(2); + data.append(5); + data.append(5); + data.append(0); + data.append(3); data.append(2); - data.append(4); - data.append(4); data.append(3); - data.append(5); + data.append(1); + data.append(3); + data.append(2); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32/input_1.cairo b/tests/nodes/less_u32/input_1.cairo index e540f7f40..528c5927d 100644 --- a/tests/nodes/less_u32/input_1.cairo +++ b/tests/nodes/less_u32/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,31 +11,31 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); - data.append(1); - data.append(5); - data.append(5); + data.append(3); + data.append(4); data.append(2); - data.append(1); - data.append(1); data.append(2); - data.append(1); - data.append(3); - data.append(3); - data.append(0); data.append(5); - data.append(2); - data.append(0); - data.append(0); + data.append(4); + data.append(5); + data.append(4); data.append(0); data.append(4); - data.append(1); - data.append(1); + data.append(5); data.append(5); data.append(4); + data.append(0); data.append(4); data.append(2); + data.append(4); + data.append(5); + data.append(5); data.append(3); - data.append(2); + data.append(4); + data.append(5); + data.append(4); + data.append(5); + data.append(4); data.append(5); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32/output_0.cairo b/tests/nodes/less_u32/output_0.cairo index 75a278131..87600c456 100644 --- a/tests/nodes/less_u32/output_0.cairo +++ b/tests/nodes/less_u32/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(true); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32_broadcast.cairo b/tests/nodes/less_u32_broadcast.cairo index 9a7ac7a22..d394bea89 100644 --- a/tests/nodes/less_u32_broadcast.cairo +++ b/tests/nodes/less_u32_broadcast.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; +use orion::operators::tensor::BoolTensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_u32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less(@input_1); + let y_0 = input_0.less(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_u32_broadcast/input_0.cairo b/tests/nodes/less_u32_broadcast/input_0.cairo index 655814fc8..a740ac39b 100644 --- a/tests/nodes/less_u32_broadcast/input_0.cairo +++ b/tests/nodes/less_u32_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(5); - data.append(5); - data.append(3); - data.append(4); + data.append(1); data.append(4); + data.append(2); data.append(5); - data.append(3); - data.append(0); data.append(5); + data.append(0); data.append(3); data.append(3); data.append(0); - data.append(4); data.append(1); - data.append(5); - data.append(0); data.append(2); + data.append(4); + data.append(0); data.append(2); data.append(1); - data.append(3); - data.append(5); - data.append(5); + data.append(1); + data.append(2); data.append(5); + data.append(2); + data.append(3); data.append(4); - data.append(5); - data.append(5); + data.append(1); + data.append(3); + data.append(1); data.append(2); + data.append(5); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32_broadcast/input_1.cairo b/tests/nodes/less_u32_broadcast/input_1.cairo index bcb20d101..f05f4343a 100644 --- a/tests/nodes/less_u32_broadcast/input_1.cairo +++ b/tests/nodes/less_u32_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(3); - data.append(0); data.append(0); + data.append(5); + data.append(5); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32_broadcast/output_0.cairo b/tests/nodes/less_u32_broadcast/output_0.cairo index ad7acc0af..a9ade1a52 100644 --- a/tests/nodes/less_u32_broadcast/output_0.cairo +++ b/tests/nodes/less_u32_broadcast/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(true); TensorTrait::new(shape.span(), data.span()) } From 306efaf4c9594fd7203221102da7bc20d04edfa5 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Thu, 21 Mar 2024 15:13:17 +0100 Subject: [PATCH 23/68] refactor conditional ops --- src/operators/tensor/core.cairo | 28 ++++++------- .../tensor/implementations/tensor_bool.cairo | 10 ++--- .../implementations/tensor_complex64.cairo | 10 ++--- .../implementations/tensor_fp16x16.cairo | 10 ++--- .../implementations/tensor_fp16x16wide.cairo | 10 ++--- .../implementations/tensor_fp32x32.cairo | 10 ++--- .../implementations/tensor_fp64x64.cairo | 10 ++--- .../implementations/tensor_fp8x23.cairo | 10 ++--- .../implementations/tensor_fp8x23wide.cairo | 10 ++--- .../tensor/implementations/tensor_i32.cairo | 10 ++--- .../tensor/implementations/tensor_i8.cairo | 10 ++--- .../tensor/implementations/tensor_u32.cairo | 10 ++--- src/operators/tensor/math/and.cairo | 14 +++++-- src/operators/tensor/math/equal.cairo | 10 ++--- src/operators/tensor/math/is_inf.cairo | 40 ++++++++++++++----- src/operators/tensor/math/is_nan.cairo | 14 +++++-- src/operators/tensor/math/less.cairo | 10 ++--- tests/lib.cairo | 12 +++--- 18 files changed, 134 insertions(+), 104 deletions(-) diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index e3dbc6e06..1a08641f3 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -1085,7 +1085,7 @@ trait TensorTrait { /// #tensor.equal /// /// ```rust - /// fn equal(self: @Tensor, other: @Tensor) -> Tensor; + /// fn equal(self: @Tensor, other: @Tensor) -> Tensor; /// ``` /// /// Check if two tensors are equal element-wise. @@ -1150,7 +1150,7 @@ trait TensorTrait { /// >>> [true,true,true,false,false,false,false,false,false] /// ``` /// - fn equal(self: @Tensor, other: @Tensor) -> Tensor; + fn equal(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.greater /// /// ```rust @@ -1344,7 +1344,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn less_example() -> Tensor { + /// fn less_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1357,7 +1357,7 @@ trait TensorTrait { /// >>> [false,false,false,false,false,false,false,true,true] /// ``` /// - fn less(self: @Tensor, other: @Tensor) -> Tensor; + fn less(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.less_equal /// /// ```rust @@ -3465,7 +3465,7 @@ trait TensorTrait { /// #tensor.and /// /// ```rust - /// fn and(self: @Tensor, other: @Tensor) -> Tensor; + /// fn and(self: @Tensor, other: @Tensor) -> Tensor; /// ``` /// /// Computes the logical AND of two tensors element-wise. @@ -3493,7 +3493,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, BoolTensor}; /// - /// fn and_example() -> Tensor { + /// fn and_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3].span(), data: array![false, true, false, false, false, true, true, false, true, false, false, true].span(), /// ); @@ -3507,7 +3507,7 @@ trait TensorTrait { /// >>> [false, false, false, false, false, true, false, false, false, false, false, true] /// ``` /// - fn and(self: @Tensor, other: @Tensor) -> Tensor; + fn and(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.where /// /// ```rust @@ -4677,7 +4677,7 @@ trait TensorTrait { /// ## tensor.is_inf /// /// ```rust - /// fn is_inf(self: @Tensor, detect_negative: Option, detect_positive: Option) -> Tensor; + /// fn is_inf(self: @Tensor, detect_negative: Option, detect_positive: Option) -> Tensor; /// ``` /// /// Maps infinity to true and other values to false. @@ -4699,7 +4699,7 @@ trait TensorTrait { /// use core::array::{ArrayTrait, SpanTrait}; /// use orion::operators::tensor::{BoolTensor, TensorTrait, Tensor, U32Tensor}; /// - /// fn is_inf_example() -> Tensor { + /// fn is_inf_example() -> Tensor { /// let tensor = TensorTrait::::new( /// shape: array![6].span(), data: array![1, 0, NumberTrait::INF(), 8, NumberTrait::INF(), NumberTrait::INF()].span(), /// ); @@ -4711,11 +4711,11 @@ trait TensorTrait { /// fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor; + ) -> Tensor; /// ## tensor.is_nan /// /// ```rust - /// fn is_nan(self: @Tensor) -> Tensor; + /// fn is_nan(self: @Tensor) -> Tensor; /// ``` /// /// Maps NaN to true and other values to false. @@ -4735,7 +4735,7 @@ trait TensorTrait { /// use orion::operators::tensor::{BoolTensor, TensorTrait, Tensor, FP8x23Tensor}; /// use orion::numbers::{FixedTrait, FP8x23}; /// - /// fn is_nan_example() -> Tensor { + /// fn is_nan_example() -> Tensor { /// let mut shape = ArrayTrait::::new(); /// shape.append(4); /// @@ -4751,7 +4751,7 @@ trait TensorTrait { /// >>> [false, false, true, false] /// ``` /// - fn is_nan(self: @Tensor) -> Tensor; + fn is_nan(self: @Tensor) -> Tensor; /// #tensor.not /// /// ```rust @@ -4777,7 +4777,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, BoolTensor}; /// - /// fn not_example() -> Tensor { + /// fn not_example() -> Tensor { /// let tensor = TensorTrait::new( /// shape: array![3].span(), /// data: array![ diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index 110cbb20e..bc7b53569 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -109,7 +109,7 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } @@ -121,7 +121,7 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } @@ -259,7 +259,7 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } @@ -419,11 +419,11 @@ impl BoolTensor of TensorTrait { fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor { + ) -> Tensor { panic(array!['not supported!']) } - fn is_nan(self: @Tensor) -> Tensor { + fn is_nan(self: @Tensor) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 6870de2e2..295138988 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -128,7 +128,7 @@ impl Complex64Tensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } @@ -140,7 +140,7 @@ impl Complex64Tensor of TensorTrait { panic(array!['not supported!']) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } @@ -347,7 +347,7 @@ impl Complex64Tensor of TensorTrait { panic(array!['not supported!']) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } @@ -456,11 +456,11 @@ impl Complex64Tensor of TensorTrait { fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor { + ) -> Tensor { panic(array!['not supported!']) } - fn is_nan(self: @Tensor) -> Tensor { + fn is_nan(self: @Tensor) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index 6a22c9a6b..50ba750b0 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -122,7 +122,7 @@ impl FP16x16Tensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } @@ -134,7 +134,7 @@ impl FP16x16Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } @@ -385,7 +385,7 @@ impl FP16x16Tensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } @@ -487,11 +487,11 @@ impl FP16x16Tensor of TensorTrait { fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor { + ) -> Tensor { math::is_inf::is_inf(self, detect_negative, detect_positive) } - fn is_nan(self: @Tensor) -> Tensor { + fn is_nan(self: @Tensor) -> Tensor { math::is_nan::is_nan(self) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index ddca96466..434ac8fc6 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -132,7 +132,7 @@ impl FP16x16WTensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } @@ -144,7 +144,7 @@ impl FP16x16WTensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } @@ -351,7 +351,7 @@ impl FP16x16WTensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } @@ -457,11 +457,11 @@ impl FP16x16WTensor of TensorTrait { fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor { + ) -> Tensor { math::is_inf::is_inf(self, detect_negative, detect_positive) } - fn is_nan(self: @Tensor) -> Tensor { + fn is_nan(self: @Tensor) -> Tensor { math::is_nan::is_nan(self) } diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 72d629224..05abc897e 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -120,7 +120,7 @@ impl FP32x32Tensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } @@ -132,7 +132,7 @@ impl FP32x32Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } @@ -383,7 +383,7 @@ impl FP32x32Tensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } @@ -485,11 +485,11 @@ impl FP32x32Tensor of TensorTrait { fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor { + ) -> Tensor { math::is_inf::is_inf(self, detect_negative, detect_positive) } - fn is_nan(self: @Tensor) -> Tensor { + fn is_nan(self: @Tensor) -> Tensor { math::is_nan::is_nan(self) } diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 8d0b90f5e..e9b730f44 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -119,7 +119,7 @@ impl FP64x64Tensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } @@ -131,7 +131,7 @@ impl FP64x64Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } @@ -382,7 +382,7 @@ impl FP64x64Tensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } @@ -484,11 +484,11 @@ impl FP64x64Tensor of TensorTrait { fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor { + ) -> Tensor { math::is_inf::is_inf(self, detect_negative, detect_positive) } - fn is_nan(self: @Tensor) -> Tensor { + fn is_nan(self: @Tensor) -> Tensor { math::is_nan::is_nan(self) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index c319bb466..f6d3d40b0 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -121,7 +121,7 @@ impl FP8x23Tensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } @@ -133,7 +133,7 @@ impl FP8x23Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } @@ -384,7 +384,7 @@ impl FP8x23Tensor of TensorTrait { core_ops::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } @@ -484,11 +484,11 @@ impl FP8x23Tensor of TensorTrait { fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor { + ) -> Tensor { math::is_inf::is_inf(self, detect_negative, detect_positive) } - fn is_nan(self: @Tensor) -> Tensor { + fn is_nan(self: @Tensor) -> Tensor { math::is_nan::is_nan(self) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index c1b805677..f1982bf66 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -124,7 +124,7 @@ impl FP8x23WTensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } @@ -136,7 +136,7 @@ impl FP8x23WTensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } @@ -338,7 +338,7 @@ impl FP8x23WTensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } @@ -441,11 +441,11 @@ impl FP8x23WTensor of TensorTrait { fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor { + ) -> Tensor { math::is_inf::is_inf(self, detect_negative, detect_positive) } - fn is_nan(self: @Tensor) -> Tensor { + fn is_nan(self: @Tensor) -> Tensor { math::is_nan::is_nan(self) } diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 42b7bb4db..00c1f90c5 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -117,7 +117,7 @@ impl I32Tensor of TensorTrait { panic(array!['not supported!']) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } @@ -129,7 +129,7 @@ impl I32Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } @@ -372,7 +372,7 @@ impl I32Tensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } @@ -472,11 +472,11 @@ impl I32Tensor of TensorTrait { fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor { + ) -> Tensor { math::is_inf::is_inf(self, detect_negative, detect_positive) } - fn is_nan(self: @Tensor) -> Tensor { + fn is_nan(self: @Tensor) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 53f6424b0..fc8eb6c6f 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -115,7 +115,7 @@ impl I8Tensor of TensorTrait { panic(array!['not supported!']) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } @@ -127,7 +127,7 @@ impl I8Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } @@ -376,7 +376,7 @@ impl I8Tensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } @@ -476,11 +476,11 @@ impl I8Tensor of TensorTrait { fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor { + ) -> Tensor { math::is_inf::is_inf(self, detect_negative, detect_positive) } - fn is_nan(self: @Tensor) -> Tensor { + fn is_nan(self: @Tensor) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index f9af20b65..12b03fb77 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -114,7 +114,7 @@ impl U32Tensor of TensorTrait { panic(array!['not supported!']) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } @@ -126,7 +126,7 @@ impl U32Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } @@ -320,7 +320,7 @@ impl U32Tensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } @@ -420,11 +420,11 @@ impl U32Tensor of TensorTrait { fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor { + ) -> Tensor { math::is_inf::is_inf(self, detect_negative, detect_positive) } - fn is_nan(self: @Tensor) -> Tensor { + fn is_nan(self: @Tensor) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/math/and.cairo b/src/operators/tensor/math/and.cairo index 0b1369f35..c55efce4f 100644 --- a/src/operators/tensor/math/and.cairo +++ b/src/operators/tensor/math/and.cairo @@ -1,13 +1,13 @@ use orion::numbers::NumberTrait; -use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, BoolTensor}; +use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, BoolTensor, U32Tensor}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility }; /// Cf: TensorTrait::and docstring -fn and(y: @Tensor, z: @Tensor) -> Tensor { +fn and(y: @Tensor, z: @Tensor) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); @@ -18,7 +18,13 @@ fn and(y: @Tensor, z: @Tensor) -> Tensor { let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted); - result.append(*(*y.data)[indices_self] && *(*z.data)[indices_other]); + let r = if *(*y.data)[indices_self] && *(*z.data)[indices_other] { + 1 + } else { + 0 + }; + + result.append(r); n += 1; }; diff --git a/src/operators/tensor/math/equal.cairo b/src/operators/tensor/math/equal.cairo index 7b2a1cb6c..6ee8e83bd 100644 --- a/src/operators/tensor/math/equal.cairo +++ b/src/operators/tensor/math/equal.cairo @@ -1,4 +1,4 @@ -use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; +use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, U32Tensor}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility }; @@ -12,9 +12,9 @@ fn equal< impl TDrop: Drop >( y: @Tensor, z: @Tensor -) -> Tensor { +) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); @@ -26,9 +26,9 @@ fn equal< let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted); if *(*y.data)[indices_self] == *(*z.data)[indices_other] { - result.append(true); + result.append(1); } else { - result.append(false); + result.append(0); } n += 1; diff --git a/src/operators/tensor/math/is_inf.cairo b/src/operators/tensor/math/is_inf.cairo index 021b10732..147d60870 100644 --- a/src/operators/tensor/math/is_inf.cairo +++ b/src/operators/tensor/math/is_inf.cairo @@ -1,6 +1,6 @@ use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; -use orion::operators::tensor::implementations::tensor_bool::BoolTensor; +use orion::operators::tensor::U32Tensor; /// Cf: TensorTrait::is_inf docstring fn is_inf< @@ -12,7 +12,7 @@ fn is_inf< impl TDrop: Drop >( x: @Tensor, detect_negative: Option, detect_positive: Option -) -> Tensor { +) -> Tensor { let neg_opt = match detect_negative { Option::Some(val) => { if val == 0 { 0 @@ -32,7 +32,7 @@ fn is_inf< }; if neg_opt == 0 && pos_opt == 0 { - return TensorTrait::new(*x.shape, ArrayTrait::::new().span()); + return TensorTrait::new(*x.shape, ArrayTrait::::new().span()); } if neg_opt == 0 && pos_opt == 1 { @@ -43,11 +43,17 @@ fn is_inf< return is_neg_inf(x); } - let mut data_result: Array = array![]; + let mut data_result: Array = array![]; let mut y: Span = *x.data; loop { match y.pop_front() { - Option::Some(item) => { data_result.append((*item).is_inf()); }, + Option::Some(item) => { + if (*item).is_inf() { + data_result.append(1); + } else { + data_result.append(0); + } + }, Option::None => { break; } }; }; @@ -65,12 +71,18 @@ fn is_pos_inf< impl TDrop: Drop >( x: @Tensor -) -> Tensor { - let mut data_result: Array = array![]; +) -> Tensor { + let mut data_result: Array = array![]; let mut y: Span = *x.data; loop { match y.pop_front() { - Option::Some(item) => { data_result.append((*item).is_pos_inf()); }, + Option::Some(item) => { + if (*item).is_pos_inf() { + data_result.append(1); + } else { + data_result.append(0); + } + }, Option::None => { break; } }; }; @@ -88,12 +100,18 @@ fn is_neg_inf< impl TDrop: Drop >( x: @Tensor -) -> Tensor { - let mut data_result: Array = array![]; +) -> Tensor { + let mut data_result: Array = array![]; let mut y: Span = *x.data; loop { match y.pop_front() { - Option::Some(item) => { data_result.append((*item).is_neg_inf()); }, + Option::Some(item) => { + if (*item).is_neg_inf() { + data_result.append(1); + } else { + data_result.append(0); + } + }, Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/is_nan.cairo b/src/operators/tensor/math/is_nan.cairo index 2f1818a81..774c29d5b 100644 --- a/src/operators/tensor/math/is_nan.cairo +++ b/src/operators/tensor/math/is_nan.cairo @@ -1,6 +1,6 @@ use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; -use orion::operators::tensor::implementations::tensor_bool::BoolTensor; +use orion::operators::tensor::U32Tensor; /// Cf: TensorTrait::is_nan docstring fn is_nan< @@ -12,12 +12,18 @@ fn is_nan< impl TDrop: Drop >( x: @Tensor -) -> Tensor { - let mut data_result: Array = array![]; +) -> Tensor { + let mut data_result: Array = array![]; let mut y: Span = *x.data; loop { match y.pop_front() { - Option::Some(item) => { data_result.append((*item).is_nan()); }, + Option::Some(item) => { + if (*item).is_nan() { + data_result.append(1); + } else { + data_result.append(0); + } + }, Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/less.cairo b/src/operators/tensor/math/less.cairo index bd28f64ac..4afddc3fb 100644 --- a/src/operators/tensor/math/less.cairo +++ b/src/operators/tensor/math/less.cairo @@ -6,15 +6,15 @@ use orion::operators::tensor::helpers::{ /// Cf: TensorTrait::less docstring fn less< T, - impl BoolTensor: TensorTrait, + impl U32Tensor: TensorTrait, impl TPartialOrd: PartialOrd, impl TCopy: Copy, impl TDrop: Drop >( y: @Tensor, z: @Tensor -) -> Tensor { +) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); @@ -26,9 +26,9 @@ fn less< let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted); if *(*y.data)[indices_self] < *(*z.data)[indices_other] { - result.append(true); + result.append(1); } else { - result.append(false); + result.append(0); } n += 1; diff --git a/tests/lib.cairo b/tests/lib.cairo index f5cecb77d..eb58139db 100644 --- a/tests/lib.cairo +++ b/tests/lib.cairo @@ -1,7 +1,7 @@ -mod numbers; -mod performance; -mod tensor_core; -mod nodes; -mod ml; -mod operators; +// mod numbers; +// mod performance; +// mod tensor_core; +// mod nodes; +// mod ml; +// mod operators; From fc3e40cfd4633a9b6ad94cbb7563479ffaad41eb Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Thu, 21 Mar 2024 17:09:53 +0100 Subject: [PATCH 24/68] test ops --- nodegen/node/and.py | 4 +- nodegen/node/equal.py | 20 +-- nodegen/node/is_inf.py | 138 +----------------- nodegen/node/is_nan.py | 19 +-- nodegen/node/less.py | 20 +-- nodegen/node/not.py | 15 +- tests/lib.cairo | 12 +- tests/nodes.cairo | 20 +-- tests/nodes/and_bool.cairo | 12 +- tests/nodes/and_bool/input_0.cairo | 10 +- tests/nodes/and_bool/input_1.cairo | 8 +- tests/nodes/and_bool/output_0.cairo | 29 ++-- tests/nodes/and_bool_broadcast.cairo | 12 +- tests/nodes/and_bool_broadcast/input_0.cairo | 28 ++-- tests/nodes/and_bool_broadcast/input_1.cairo | 24 +-- tests/nodes/and_bool_broadcast/output_0.cairo | 125 ++++++++-------- tests/nodes/equal_fp16x16.cairo | 8 +- tests/nodes/equal_fp16x16/input_0.cairo | 30 ++-- tests/nodes/equal_fp16x16/input_1.cairo | 28 ++-- tests/nodes/equal_fp16x16/output_0.cairo | 59 ++++---- tests/nodes/equal_fp16x16_broadcast.cairo | 8 +- .../equal_fp16x16_broadcast/input_0.cairo | 6 +- .../equal_fp16x16_broadcast/input_1.cairo | 2 +- .../equal_fp16x16_broadcast/output_0.cairo | 13 +- tests/nodes/equal_fp8x23.cairo | 8 +- tests/nodes/equal_fp8x23/input_0.cairo | 24 +-- tests/nodes/equal_fp8x23/input_1.cairo | 30 ++-- tests/nodes/equal_fp8x23/output_0.cairo | 59 ++++---- tests/nodes/equal_fp8x23_broadcast.cairo | 8 +- .../equal_fp8x23_broadcast/input_0.cairo | 4 +- .../equal_fp8x23_broadcast/input_1.cairo | 4 +- .../equal_fp8x23_broadcast/output_0.cairo | 13 +- tests/nodes/equal_i32.cairo | 4 +- tests/nodes/equal_i32/input_0.cairo | 22 +-- tests/nodes/equal_i32/input_1.cairo | 30 ++-- tests/nodes/equal_i32/output_0.cairo | 59 ++++---- tests/nodes/equal_i32_broadcast.cairo | 4 +- tests/nodes/equal_i32_broadcast/input_0.cairo | 6 +- tests/nodes/equal_i32_broadcast/input_1.cairo | 4 +- .../nodes/equal_i32_broadcast/output_0.cairo | 13 +- tests/nodes/equal_i8.cairo | 6 +- tests/nodes/equal_i8/input_0.cairo | 24 +-- tests/nodes/equal_i8/input_1.cairo | 24 +-- tests/nodes/equal_i8/output_0.cairo | 59 ++++---- tests/nodes/equal_i8_broadcast.cairo | 6 +- tests/nodes/equal_i8_broadcast/input_0.cairo | 4 +- tests/nodes/equal_i8_broadcast/input_1.cairo | 2 +- tests/nodes/equal_i8_broadcast/output_0.cairo | 13 +- tests/nodes/equal_u32.cairo | 4 +- tests/nodes/equal_u32/input_0.cairo | 30 ++-- tests/nodes/equal_u32/input_1.cairo | 28 ++-- tests/nodes/equal_u32/output_0.cairo | 59 ++++---- tests/nodes/equal_u32_broadcast.cairo | 4 +- tests/nodes/equal_u32_broadcast/input_0.cairo | 6 +- tests/nodes/equal_u32_broadcast/input_1.cairo | 2 +- .../nodes/equal_u32_broadcast/output_0.cairo | 13 +- tests/nodes/is_inf_fp16x16.cairo | 22 --- tests/nodes/is_inf_fp16x16/input_0.cairo | 18 --- tests/nodes/is_inf_fp16x16/output_0.cairo | 17 --- tests/nodes/is_inf_fp8x23.cairo | 22 --- tests/nodes/is_inf_fp8x23/input_0.cairo | 18 --- tests/nodes/is_inf_fp8x23/output_0.cairo | 17 --- tests/nodes/is_inf_i32.cairo | 14 +- tests/nodes/is_inf_i32/input_0.cairo | 8 +- tests/nodes/is_inf_i32/output_0.cairo | 17 ++- tests/nodes/is_inf_i8.cairo | 22 --- tests/nodes/is_inf_i8/input_0.cairo | 18 --- tests/nodes/is_inf_i8/output_0.cairo | 17 --- tests/nodes/is_inf_u32.cairo | 22 --- tests/nodes/is_inf_u32/input_0.cairo | 18 --- tests/nodes/is_inf_u32/output_0.cairo | 17 --- tests/nodes/is_nan_fp16x16.cairo | 14 +- tests/nodes/is_nan_fp16x16/input_0.cairo | 8 +- tests/nodes/is_nan_fp16x16/output_0.cairo | 17 ++- tests/nodes/is_nan_fp8x23.cairo | 22 --- tests/nodes/is_neg_inf_fp16x16.cairo | 22 --- tests/nodes/is_neg_inf_fp16x16/input_0.cairo | 18 --- tests/nodes/is_neg_inf_fp16x16/output_0.cairo | 17 --- tests/nodes/is_neg_inf_fp8x23.cairo | 22 --- tests/nodes/is_neg_inf_fp8x23/input_0.cairo | 18 --- tests/nodes/is_neg_inf_fp8x23/output_0.cairo | 17 --- tests/nodes/is_neg_inf_i32.cairo | 14 +- tests/nodes/is_neg_inf_i32/input_0.cairo | 8 +- tests/nodes/is_neg_inf_i32/output_0.cairo | 17 ++- tests/nodes/is_neg_inf_i8.cairo | 22 --- tests/nodes/is_neg_inf_i8/input_0.cairo | 18 --- tests/nodes/is_neg_inf_i8/output_0.cairo | 17 --- tests/nodes/is_pos_inf_fp16x16.cairo | 22 --- tests/nodes/is_pos_inf_fp16x16/input_0.cairo | 18 --- tests/nodes/is_pos_inf_fp16x16/output_0.cairo | 17 --- tests/nodes/is_pos_inf_fp8x23.cairo | 22 --- tests/nodes/is_pos_inf_fp8x23/input_0.cairo | 18 --- tests/nodes/is_pos_inf_fp8x23/output_0.cairo | 17 --- tests/nodes/is_pos_inf_i32.cairo | 14 +- tests/nodes/is_pos_inf_i32/input_0.cairo | 8 +- tests/nodes/is_pos_inf_i32/output_0.cairo | 17 ++- tests/nodes/is_pos_inf_i8.cairo | 22 --- tests/nodes/is_pos_inf_i8/input_0.cairo | 18 --- tests/nodes/is_pos_inf_i8/output_0.cairo | 17 --- tests/nodes/less_fp16x16.cairo | 8 +- tests/nodes/less_fp16x16/input_0.cairo | 24 +-- tests/nodes/less_fp16x16/input_1.cairo | 30 ++-- tests/nodes/less_fp16x16/output_0.cairo | 59 ++++---- tests/nodes/less_fp16x16_broadcast.cairo | 8 +- .../less_fp16x16_broadcast/input_0.cairo | 22 +-- .../less_fp16x16_broadcast/input_1.cairo | 4 +- .../less_fp16x16_broadcast/output_0.cairo | 59 ++++---- tests/nodes/less_fp8x23.cairo | 8 +- tests/nodes/less_fp8x23/input_0.cairo | 24 +-- tests/nodes/less_fp8x23/input_1.cairo | 24 +-- tests/nodes/less_fp8x23/output_0.cairo | 59 ++++---- tests/nodes/less_fp8x23_broadcast.cairo | 8 +- .../nodes/less_fp8x23_broadcast/input_0.cairo | 28 ++-- .../nodes/less_fp8x23_broadcast/input_1.cairo | 4 +- .../less_fp8x23_broadcast/output_0.cairo | 59 ++++---- tests/nodes/less_i32.cairo | 8 +- tests/nodes/less_i32/input_0.cairo | 26 ++-- tests/nodes/less_i32/input_1.cairo | 26 ++-- tests/nodes/less_i32/output_0.cairo | 59 ++++---- tests/nodes/less_i32_broadcast.cairo | 8 +- tests/nodes/less_i32_broadcast/input_0.cairo | 28 ++-- tests/nodes/less_i32_broadcast/input_1.cairo | 6 +- tests/nodes/less_i32_broadcast/output_0.cairo | 59 ++++---- tests/nodes/less_i8.cairo | 8 +- tests/nodes/less_i8/input_0.cairo | 28 ++-- tests/nodes/less_i8/input_1.cairo | 26 ++-- tests/nodes/less_i8/output_0.cairo | 59 ++++---- tests/nodes/less_i8_broadcast.cairo | 8 +- tests/nodes/less_i8_broadcast/input_0.cairo | 26 ++-- tests/nodes/less_i8_broadcast/input_1.cairo | 6 +- tests/nodes/less_i8_broadcast/output_0.cairo | 59 ++++---- tests/nodes/less_u32.cairo | 6 +- tests/nodes/less_u32/input_0.cairo | 24 +-- tests/nodes/less_u32/input_1.cairo | 28 ++-- tests/nodes/less_u32/output_0.cairo | 59 ++++---- tests/nodes/less_u32_broadcast.cairo | 6 +- tests/nodes/less_u32_broadcast/input_0.cairo | 28 ++-- tests/nodes/less_u32_broadcast/input_1.cairo | 4 +- tests/nodes/less_u32_broadcast/output_0.cairo | 59 ++++---- tests/nodes/not_bool.cairo | 10 +- tests/nodes/not_bool/input_0.cairo | 2 +- tests/nodes/not_bool/output_0.cairo | 2 +- 142 files changed, 1176 insertions(+), 1894 deletions(-) delete mode 100644 tests/nodes/is_inf_fp16x16.cairo delete mode 100644 tests/nodes/is_inf_fp16x16/input_0.cairo delete mode 100644 tests/nodes/is_inf_fp16x16/output_0.cairo delete mode 100644 tests/nodes/is_inf_fp8x23.cairo delete mode 100644 tests/nodes/is_inf_fp8x23/input_0.cairo delete mode 100644 tests/nodes/is_inf_fp8x23/output_0.cairo delete mode 100644 tests/nodes/is_inf_i8.cairo delete mode 100644 tests/nodes/is_inf_i8/input_0.cairo delete mode 100644 tests/nodes/is_inf_i8/output_0.cairo delete mode 100644 tests/nodes/is_inf_u32.cairo delete mode 100644 tests/nodes/is_inf_u32/input_0.cairo delete mode 100644 tests/nodes/is_inf_u32/output_0.cairo delete mode 100644 tests/nodes/is_nan_fp8x23.cairo delete mode 100644 tests/nodes/is_neg_inf_fp16x16.cairo delete mode 100644 tests/nodes/is_neg_inf_fp16x16/input_0.cairo delete mode 100644 tests/nodes/is_neg_inf_fp16x16/output_0.cairo delete mode 100644 tests/nodes/is_neg_inf_fp8x23.cairo delete mode 100644 tests/nodes/is_neg_inf_fp8x23/input_0.cairo delete mode 100644 tests/nodes/is_neg_inf_fp8x23/output_0.cairo delete mode 100644 tests/nodes/is_neg_inf_i8.cairo delete mode 100644 tests/nodes/is_neg_inf_i8/input_0.cairo delete mode 100644 tests/nodes/is_neg_inf_i8/output_0.cairo delete mode 100644 tests/nodes/is_pos_inf_fp16x16.cairo delete mode 100644 tests/nodes/is_pos_inf_fp16x16/input_0.cairo delete mode 100644 tests/nodes/is_pos_inf_fp16x16/output_0.cairo delete mode 100644 tests/nodes/is_pos_inf_fp8x23.cairo delete mode 100644 tests/nodes/is_pos_inf_fp8x23/input_0.cairo delete mode 100644 tests/nodes/is_pos_inf_fp8x23/output_0.cairo delete mode 100644 tests/nodes/is_pos_inf_i8.cairo delete mode 100644 tests/nodes/is_pos_inf_i8/input_0.cairo delete mode 100644 tests/nodes/is_pos_inf_i8/output_0.cairo diff --git a/nodegen/node/and.py b/nodegen/node/and.py index 975f4580f..f8885702f 100644 --- a/nodegen/node/and.py +++ b/nodegen/node/and.py @@ -13,7 +13,7 @@ def default(): x = Tensor(Dtype.BOOL, x.shape, x.flatten()) y = Tensor(Dtype.BOOL, y.shape, y.flatten()) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "and_bool" make_test([x, y], z, "BoolTensor::and(@input_0, @input_1)", name) @@ -25,7 +25,7 @@ def broadcast(): x = Tensor(Dtype.BOOL, x.shape, x.flatten()) y = Tensor(Dtype.BOOL, y.shape, y.flatten()) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "and_bool_broadcast" make_test([x, y], z, "BoolTensor::and(@input_0, @input_1)", name) diff --git a/nodegen/node/equal.py b/nodegen/node/equal.py index 9474d3eca..f995ae999 100644 --- a/nodegen/node/equal.py +++ b/nodegen/node/equal.py @@ -13,7 +13,7 @@ def default(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_u32" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -25,7 +25,7 @@ def broadcast(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_u32_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -42,7 +42,7 @@ def default(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_i32" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -54,7 +54,7 @@ def broadcast(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_i32_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -71,7 +71,7 @@ def default(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_i8" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -83,7 +83,7 @@ def broadcast(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_i8_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -102,7 +102,7 @@ def default(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_fp8x23" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -116,7 +116,7 @@ def broadcast(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_fp8x23_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -135,7 +135,7 @@ def default(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_fp16x16" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -149,7 +149,7 @@ def broadcast(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_fp16x16_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) diff --git a/nodegen/node/is_inf.py b/nodegen/node/is_inf.py index ef67a216c..9f72e315c 100644 --- a/nodegen/node/is_inf.py +++ b/nodegen/node/is_inf.py @@ -6,162 +6,38 @@ class Is_inf(RunAll): - @staticmethod - def is_inf_u32(): - def default(): - input_0 = np.array([1, 0, INF, 8, -INF, INF], dtype=np.uint32) - output = np.array([False, False, True, False, True, True], dtype=bool) - - input_0 = Tensor(Dtype.U32, input_0.shape, input_0.flatten()) - output = Tensor(Dtype.BOOL, output.shape, output.flatten()) - - name = "is_inf_u32" - make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::None, Option::None)", name) - - default() - @staticmethod def is_inf_i32(): def default(): input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int32) - output = np.array([False, False, True, False, True, True], dtype=bool) + output = np.array([0, 0, 1, 0, 1, 1], dtype=np.uint32) input_0 = Tensor(Dtype.I32, input_0.shape, input_0.flatten()) - output = Tensor(Dtype.BOOL, output.shape, output.flatten()) + output = Tensor(Dtype.U32, output.shape, output.flatten()) name = "is_inf_i32" make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::None, Option::None)", name) def positive(): input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int32) - output = np.array([False, False, True, False, False, True], dtype=bool) + output = np.array([0, 0, 1, 0, 0, 1], dtype=np.uint32) input_0 = Tensor(Dtype.I32, input_0.shape, input_0.flatten()) - output = Tensor(Dtype.BOOL, output.shape, output.flatten()) + output = Tensor(Dtype.U32, output.shape, output.flatten()) name = "is_pos_inf_i32" make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1))", name) def negative(): input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int32) - output = np.array([False, False, False, False, True, False], dtype=bool) + output = np.array([0, 0, 0, 0, 1, 0], dtype=np.uint32) input_0 = Tensor(Dtype.I32, input_0.shape, input_0.flatten()) - output = Tensor(Dtype.BOOL, output.shape, output.flatten()) + output = Tensor(Dtype.U32, output.shape, output.flatten()) name = "is_neg_inf_i32" make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0))", name) default() positive() - negative() - - @staticmethod - def is_inf_i8(): - def default(): - input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int8) - output = np.array([False, False, True, False, True, True], dtype=bool) - - input_0 = Tensor(Dtype.I8, input_0.shape, input_0.flatten()) - output = Tensor(Dtype.BOOL, output.shape, output.flatten()) - - name = "is_inf_i8" - make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::None, Option::None)", name) - - def positive(): - input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int32) - output = np.array([False, False, True, False, False, True], dtype=bool) - - input_0 = Tensor(Dtype.I8, input_0.shape, input_0.flatten()) - output = Tensor(Dtype.BOOL, output.shape, output.flatten()) - - name = "is_pos_inf_i8" - make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1))", name) - - def negative(): - input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int32) - output = np.array([False, False, False, False, True, False], dtype=bool) - - input_0 = Tensor(Dtype.I8, input_0.shape, input_0.flatten()) - output = Tensor(Dtype.BOOL, output.shape, output.flatten()) - - name = "is_neg_inf_i8" - make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0))", name) - - default() - positive() - negative() - - @staticmethod - def is_inf_fp8x23(): - def default(): - input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64) - output = np.array([False, False, True, False, True, True], dtype=bool) - - input_0 = Tensor(Dtype.FP8x23, input_0.shape, to_fp( - input_0.flatten(), FixedImpl.FP8x23)) - output = Tensor(Dtype.BOOL, output.shape, output.flatten()) - - name = "is_inf_fp8x23" - make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::None, Option::None)", name) - - def positive(): - input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64) - output = np.array([False, False, True, False, False, True], dtype=bool) - - input_0 = Tensor(Dtype.FP8x23, input_0.shape, input_0.flatten()) - output = Tensor(Dtype.BOOL, output.shape, output.flatten()) - - name = "is_pos_inf_fp8x23" - make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1))", name) - - def negative(): - input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64) - output = np.array([False, False, False, False, True, False], dtype=bool) - - input_0 = Tensor(Dtype.FP8x23, input_0.shape, input_0.flatten()) - output = Tensor(Dtype.BOOL, output.shape, output.flatten()) - - name = "is_neg_inf_fp8x23" - make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0))", name) - - default() - positive() - negative() - - @staticmethod - def is_inf_fp16x16(): - def default(): - input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64) - output = np.array([False, False, True, False, True, True], dtype=bool) - - input_0 = Tensor(Dtype.FP16x16, input_0.shape, to_fp( - input_0.flatten(), FixedImpl.FP16x16)) - output = Tensor(Dtype.BOOL, output.shape, output.flatten()) - - name = "is_inf_fp16x16" - make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::None, Option::None)", name) - - def positive(): - input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64) - output = np.array([False, False, True, False, False, True], dtype=bool) - - input_0 = Tensor(Dtype.FP16x16, input_0.shape, input_0.flatten()) - output = Tensor(Dtype.BOOL, output.shape, output.flatten()) - - name = "is_pos_inf_fp16x16" - make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1))", name) - - def negative(): - input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64) - output = np.array([False, False, False, False, True, False], dtype=bool) - - input_0 = Tensor(Dtype.FP16x16, input_0.shape, input_0.flatten()) - output = Tensor(Dtype.BOOL, output.shape, output.flatten()) - - name = "is_neg_inf_fp16x16" - make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0))", name) - - default() - positive() - negative() + negative() \ No newline at end of file diff --git a/nodegen/node/is_nan.py b/nodegen/node/is_nan.py index 05f7ab5df..469e145ea 100644 --- a/nodegen/node/is_nan.py +++ b/nodegen/node/is_nan.py @@ -8,30 +8,15 @@ class Is_nan(RunAll): - @staticmethod - def is_nan_fp8x23(): - def default(): - input_0 = np.array([-1.2, 0, NaN, 2.8, NaN, NaN], dtype=np.float64) - output = np.array([False, False, True, False, True, True], dtype=bool) - - input_0 = Tensor(Dtype.FP8x23, input_0.shape, to_fp( - input_0.flatten(), FixedImpl.FP8x23)) - output = Tensor(Dtype.BOOL, output.shape, output.flatten()) - - name = "is_nan_fp8x23" - make_test([input_0], output, "TensorTrait::is_nan(@input_0)", name) - - default() - @staticmethod def is_nan_fp16x16(): def default(): input_0 = np.array([-1.2, 0, NaN, 2.8, NaN, NaN], dtype=np.float64) - output = np.array([False, False, True, False, True, True], dtype=bool) + output = np.array([0, 0, 1, 0, 1, 1]) input_0 = Tensor(Dtype.FP16x16, input_0.shape, to_fp( input_0.flatten(), FixedImpl.FP16x16)) - output = Tensor(Dtype.BOOL, output.shape, output.flatten()) + output = Tensor(Dtype.U32, output.shape, output.flatten()) name = "is_nan_fp16x16" make_test([input_0], output, "TensorTrait::is_nan(@input_0)", name) diff --git a/nodegen/node/less.py b/nodegen/node/less.py index 14af93201..20b39263d 100644 --- a/nodegen/node/less.py +++ b/nodegen/node/less.py @@ -13,7 +13,7 @@ def default(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_u32" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -25,7 +25,7 @@ def broadcast(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_u32_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -42,7 +42,7 @@ def default(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_i32" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -54,7 +54,7 @@ def broadcast(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_i32_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -71,7 +71,7 @@ def default(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_i8" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -83,7 +83,7 @@ def broadcast(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_i8_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -102,7 +102,7 @@ def default(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_fp8x23" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -116,7 +116,7 @@ def broadcast(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_fp8x23_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -135,7 +135,7 @@ def default(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_fp16x16" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -149,7 +149,7 @@ def broadcast(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_fp16x16_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) diff --git a/nodegen/node/not.py b/nodegen/node/not.py index 6a0f44d65..ef4cae4fb 100644 --- a/nodegen/node/not.py +++ b/nodegen/node/not.py @@ -1,17 +1,18 @@ import numpy as np from nodegen.node import RunAll -from ..helpers import make_node, make_test, Tensor, Dtype +from ..helpers import make_test, Tensor, Dtype + class Not(RunAll): @staticmethod def not_bool(): x = np.random.uniform(True, False, (1, 1)).astype(bool) - y = ~(x) - - x = Tensor(Dtype.Bool, x.shape, x.flatten()) - y = Tensor(Dtype.Bool, y.shape, y.flatten()) + y = np.logical_not(x) + x = Tensor(Dtype.BOOL, x.shape, x.flatten()) + y = Tensor(Dtype.BOOL, y.shape, y.flatten()) name = "not_bool" - make_node([x], [y], name) - make_test([x], y, "input_0", name) \ No newline at end of file + make_test([x], y, "input_0.not()", name) + + not_bool() diff --git a/tests/lib.cairo b/tests/lib.cairo index eb58139db..f5cecb77d 100644 --- a/tests/lib.cairo +++ b/tests/lib.cairo @@ -1,7 +1,7 @@ -// mod numbers; -// mod performance; -// mod tensor_core; -// mod nodes; -// mod ml; -// mod operators; +mod numbers; +mod performance; +mod tensor_core; +mod nodes; +mod ml; +mod operators; diff --git a/tests/nodes.cairo b/tests/nodes.cairo index ff888973b..2907ed7aa 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -784,21 +784,10 @@ mod concat_from_sequence_i8_new_axis_default; mod concat_from_sequence_u32_new_axis_zero; mod concat_from_sequence_u32_new_axis_one; mod concat_from_sequence_u32_new_axis_default; -mod is_nan_fp16x16; -mod is_nan_fp8x23; -mod is_inf_fp16x16; -mod is_inf_fp8x23; -mod is_inf_i32; -mod is_inf_i8; -mod is_inf_u32; -mod is_pos_inf_fp16x16; -mod is_neg_inf_fp16x16; -mod is_pos_inf_fp8x23; -mod is_neg_inf_fp8x23; -mod is_pos_inf_i32; -mod is_neg_inf_i32; -mod is_pos_inf_i8; -mod is_neg_inf_i8; +// mod is_nan_fp16x16; +// mod is_inf_i32; +// mod is_pos_inf_i32; +// mod is_neg_inf_i32; mod reduce_log_sum_fp8x23_export_do_not_keepdims; mod reduce_log_sum_fp8x23_export_keepdims; mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; @@ -1051,3 +1040,4 @@ mod reduce_sum_keep_dims; mod reduce_sum_no_keep_dims; mod reduce_sum_default_axes_keepdims; mod reduce_sum_empty_axes_input_noop; +mod and_bool_broadcast; diff --git a/tests/nodes/and_bool.cairo b/tests/nodes/and_bool.cairo index 223240abe..d89a0a213 100644 --- a/tests/nodes/and_bool.cairo +++ b/tests/nodes/and_bool.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::BoolTensor; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensorPartialEq; #[test] #[available_gas(2000000000)] fn test_and_bool() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = BoolTensor::and(@input_0, @input_1); + let y_0 = BoolTensor::and(@input_0, @input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/and_bool/input_0.cairo b/tests/nodes/and_bool/input_0.cairo index 881c7e8ea..76cf09e07 100644 --- a/tests/nodes/and_bool/input_0.cairo +++ b/tests/nodes/and_bool/input_0.cairo @@ -11,14 +11,14 @@ fn input_0() -> Tensor { data.append(false); data.append(true); data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(true); data.append(false); data.append(false); data.append(true); + data.append(false); data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/and_bool/input_1.cairo b/tests/nodes/and_bool/input_1.cairo index e26f3717a..96fa7fe95 100644 --- a/tests/nodes/and_bool/input_1.cairo +++ b/tests/nodes/and_bool/input_1.cairo @@ -8,17 +8,17 @@ fn input_1() -> Tensor { shape.append(4); let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(true); - data.append(false); data.append(true); data.append(false); data.append(false); data.append(false); + data.append(true); + data.append(true); + data.append(true); data.append(false); data.append(true); data.append(true); data.append(true); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/and_bool/output_0.cairo b/tests/nodes/and_bool/output_0.cairo index e961a4093..365e209c0 100644 --- a/tests/nodes/and_bool/output_0.cairo +++ b/tests/nodes/and_bool/output_0.cairo @@ -1,24 +1,25 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(4); let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(true); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/and_bool_broadcast.cairo b/tests/nodes/and_bool_broadcast.cairo index 1ef34c86b..839103063 100644 --- a/tests/nodes/and_bool_broadcast.cairo +++ b/tests/nodes/and_bool_broadcast.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::BoolTensor; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensorPartialEq; #[test] #[available_gas(2000000000)] fn test_and_bool_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = BoolTensor::and(@input_0, @input_1); + let y_0 = BoolTensor::and(@input_0, @input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/and_bool_broadcast/input_0.cairo b/tests/nodes/and_bool_broadcast/input_0.cairo index 56fdd1103..bb1a94372 100644 --- a/tests/nodes/and_bool_broadcast/input_0.cairo +++ b/tests/nodes/and_bool_broadcast/input_0.cairo @@ -9,65 +9,65 @@ fn input_0() -> Tensor { shape.append(5); let mut data = ArrayTrait::new(); - data.append(true); - data.append(true); - data.append(false); - data.append(false); data.append(false); + data.append(true); data.append(false); data.append(false); data.append(true); data.append(false); - data.append(true); data.append(false); + data.append(true); data.append(false); data.append(false); data.append(false); data.append(true); data.append(false); - data.append(false); - data.append(false); data.append(true); data.append(false); + data.append(true); data.append(false); data.append(false); data.append(true); - data.append(false); data.append(true); data.append(true); data.append(false); - data.append(false); - data.append(false); data.append(true); data.append(true); data.append(true); + data.append(true); + data.append(true); + data.append(false); data.append(false); data.append(false); data.append(false); data.append(false); + data.append(true); + data.append(false); data.append(false); data.append(false); data.append(false); data.append(false); data.append(true); data.append(false); + data.append(false); + data.append(false); data.append(true); data.append(false); data.append(false); data.append(true); data.append(true); - data.append(false); + data.append(true); data.append(true); data.append(false); + data.append(true); data.append(false); - data.append(false); + data.append(true); data.append(false); data.append(true); data.append(true); data.append(false); + data.append(false); data.append(true); data.append(true); - data.append(true); - data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/and_bool_broadcast/input_1.cairo b/tests/nodes/and_bool_broadcast/input_1.cairo index 8da43cc44..3dfd5a03e 100644 --- a/tests/nodes/and_bool_broadcast/input_1.cairo +++ b/tests/nodes/and_bool_broadcast/input_1.cairo @@ -9,43 +9,36 @@ fn input_1() -> Tensor { shape.append(5); let mut data = ArrayTrait::new(); + data.append(false); data.append(true); data.append(true); data.append(true); data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(true); - data.append(true); data.append(false); data.append(false); - data.append(true); data.append(false); data.append(true); data.append(false); - data.append(true); - data.append(true); data.append(false); data.append(false); data.append(true); - data.append(false); - data.append(false); data.append(true); data.append(false); + data.append(true); data.append(false); data.append(false); data.append(true); data.append(true); data.append(true); - data.append(true); - data.append(false); data.append(false); data.append(true); data.append(true); data.append(true); data.append(true); + data.append(true); data.append(false); + data.append(false); + data.append(true); data.append(true); data.append(false); data.append(false); @@ -54,20 +47,27 @@ fn input_1() -> Tensor { data.append(true); data.append(true); data.append(true); + data.append(false); data.append(true); data.append(false); data.append(true); data.append(false); + data.append(false); data.append(true); + data.append(false); data.append(true); data.append(true); data.append(false); data.append(false); data.append(true); data.append(false); + data.append(true); + data.append(true); data.append(false); data.append(false); data.append(true); data.append(false); + data.append(false); + data.append(true); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/and_bool_broadcast/output_0.cairo b/tests/nodes/and_bool_broadcast/output_0.cairo index e12ed574d..0b1b699f0 100644 --- a/tests/nodes/and_bool_broadcast/output_0.cairo +++ b/tests/nodes/and_bool_broadcast/output_0.cairo @@ -1,73 +1,74 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(4); shape.append(5); let mut data = ArrayTrait::new(); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16.cairo b/tests/nodes/equal_fp16x16.cairo index a71efdeda..65c0a784f 100644 --- a/tests/nodes/equal_fp16x16.cairo +++ b/tests/nodes/equal_fp16x16.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::BoolTensor; -use orion::operators::tensor::BoolTensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/equal_fp16x16/input_0.cairo b/tests/nodes/equal_fp16x16/input_0.cairo index 75bbd1b1a..80fc69258 100644 --- a/tests/nodes/equal_fp16x16/input_0.cairo +++ b/tests/nodes/equal_fp16x16/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16/input_1.cairo b/tests/nodes/equal_fp16x16/input_1.cairo index 76be7abd4..28be847cd 100644 --- a/tests/nodes/equal_fp16x16/input_1.cairo +++ b/tests/nodes/equal_fp16x16/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16/output_0.cairo b/tests/nodes/equal_fp16x16/output_0.cairo index 5806c9c8f..ac7620e41 100644 --- a/tests/nodes/equal_fp16x16/output_0.cairo +++ b/tests/nodes/equal_fp16x16/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(false); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16_broadcast.cairo b/tests/nodes/equal_fp16x16_broadcast.cairo index 1ca98f2ab..b01301226 100644 --- a/tests/nodes/equal_fp16x16_broadcast.cairo +++ b/tests/nodes/equal_fp16x16_broadcast.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::BoolTensor; -use orion::operators::tensor::BoolTensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/equal_fp16x16_broadcast/input_0.cairo b/tests/nodes/equal_fp16x16_broadcast/input_0.cairo index 6e630308c..63bb6f2bd 100644 --- a/tests/nodes/equal_fp16x16_broadcast/input_0.cairo +++ b/tests/nodes/equal_fp16x16_broadcast/input_0.cairo @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16_broadcast/input_1.cairo b/tests/nodes/equal_fp16x16_broadcast/input_1.cairo index b3280a1c0..62fb47cf7 100644 --- a/tests/nodes/equal_fp16x16_broadcast/input_1.cairo +++ b/tests/nodes/equal_fp16x16_broadcast/input_1.cairo @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16_broadcast/output_0.cairo b/tests/nodes/equal_fp16x16_broadcast/output_0.cairo index e42f5e1e1..1b2b1839c 100644 --- a/tests/nodes/equal_fp16x16_broadcast/output_0.cairo +++ b/tests/nodes/equal_fp16x16_broadcast/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(false); - data.append(false); + data.append(0); + data.append(1); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp8x23.cairo b/tests/nodes/equal_fp8x23.cairo index 6a0d2aac1..2cb21620e 100644 --- a/tests/nodes/equal_fp8x23.cairo +++ b/tests/nodes/equal_fp8x23.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23TensorPartialEq; use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::BoolTensor; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/equal_fp8x23/input_0.cairo b/tests/nodes/equal_fp8x23/input_0.cairo index b13b78476..e4da062fe 100644 --- a/tests/nodes/equal_fp8x23/input_0.cairo +++ b/tests/nodes/equal_fp8x23/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp8x23/input_1.cairo b/tests/nodes/equal_fp8x23/input_1.cairo index 4178c68b4..dd0be993a 100644 --- a/tests/nodes/equal_fp8x23/input_1.cairo +++ b/tests/nodes/equal_fp8x23/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp8x23/output_0.cairo b/tests/nodes/equal_fp8x23/output_0.cairo index b38d8f0d7..07372d8e0 100644 --- a/tests/nodes/equal_fp8x23/output_0.cairo +++ b/tests/nodes/equal_fp8x23/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp8x23_broadcast.cairo b/tests/nodes/equal_fp8x23_broadcast.cairo index d40ab0cbc..f21904ba0 100644 --- a/tests/nodes/equal_fp8x23_broadcast.cairo +++ b/tests/nodes/equal_fp8x23_broadcast.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP8x23TensorPartialEq; use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::BoolTensor; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/equal_fp8x23_broadcast/input_0.cairo b/tests/nodes/equal_fp8x23_broadcast/input_0.cairo index aa6318ba4..b3a74749a 100644 --- a/tests/nodes/equal_fp8x23_broadcast/input_0.cairo +++ b/tests/nodes/equal_fp8x23_broadcast/input_0.cairo @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp8x23_broadcast/input_1.cairo b/tests/nodes/equal_fp8x23_broadcast/input_1.cairo index 8c280300a..3d5d07d10 100644 --- a/tests/nodes/equal_fp8x23_broadcast/input_1.cairo +++ b/tests/nodes/equal_fp8x23_broadcast/input_1.cairo @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp8x23_broadcast/output_0.cairo b/tests/nodes/equal_fp8x23_broadcast/output_0.cairo index e42f5e1e1..705f7ffc6 100644 --- a/tests/nodes/equal_fp8x23_broadcast/output_0.cairo +++ b/tests/nodes/equal_fp8x23_broadcast/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(false); - data.append(false); + data.append(0); + data.append(0); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i32.cairo b/tests/nodes/equal_i32.cairo index e9678d8b0..2f86f4afd 100644 --- a/tests/nodes/equal_i32.cairo +++ b/tests/nodes/equal_i32.cairo @@ -3,12 +3,12 @@ mod input_1; mod output_0; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; #[test] diff --git a/tests/nodes/equal_i32/input_0.cairo b/tests/nodes/equal_i32/input_0.cairo index 1ddc0739f..9dcfbae82 100644 --- a/tests/nodes/equal_i32/input_0.cairo +++ b/tests/nodes/equal_i32/input_0.cairo @@ -11,31 +11,31 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); - data.append(-3); - data.append(-3); - data.append(1); - data.append(-2); - data.append(2); data.append(-2); + data.append(-1); + data.append(-3); + data.append(-1); + data.append(0); data.append(0); data.append(-3); data.append(-3); data.append(0); - data.append(-3); - data.append(-2); - data.append(2); data.append(-2); data.append(-2); data.append(-2); + data.append(-3); + data.append(1); data.append(-1); data.append(0); + data.append(-2); data.append(2); + data.append(-3); data.append(-1); data.append(-2); - data.append(-3); + data.append(-1); + data.append(-2); + data.append(-1); data.append(2); - data.append(0); - data.append(-3); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i32/input_1.cairo b/tests/nodes/equal_i32/input_1.cairo index 1721bc693..e1863a501 100644 --- a/tests/nodes/equal_i32/input_1.cairo +++ b/tests/nodes/equal_i32/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-3); + data.append(2); + data.append(0); + data.append(1); data.append(-1); + data.append(2); + data.append(2); data.append(1); data.append(0); - data.append(0); - data.append(2); - data.append(-2); - data.append(-2); data.append(-1); - data.append(0); + data.append(-3); data.append(-1); data.append(-3); - data.append(2); - data.append(-2); data.append(-1); data.append(-3); - data.append(2); - data.append(1); + data.append(-3); + data.append(0); data.append(1); - data.append(-1); - data.append(2); + data.append(-3); + data.append(-3); + data.append(-3); + data.append(-3); + data.append(-3); data.append(1); - data.append(-1); - data.append(-1); - data.append(-2); + data.append(-3); data.append(2); data.append(-2); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i32/output_0.cairo b/tests/nodes/equal_i32/output_0.cairo index 1e3bb5a60..e4fb77e12 100644 --- a/tests/nodes/equal_i32/output_0.cairo +++ b/tests/nodes/equal_i32/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i32_broadcast.cairo b/tests/nodes/equal_i32_broadcast.cairo index 8acfb1db1..f21bed4f5 100644 --- a/tests/nodes/equal_i32_broadcast.cairo +++ b/tests/nodes/equal_i32_broadcast.cairo @@ -3,12 +3,12 @@ mod input_1; mod output_0; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; #[test] diff --git a/tests/nodes/equal_i32_broadcast/input_0.cairo b/tests/nodes/equal_i32_broadcast/input_0.cairo index ad3cd4116..2e440937c 100644 --- a/tests/nodes/equal_i32_broadcast/input_0.cairo +++ b/tests/nodes/equal_i32_broadcast/input_0.cairo @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); - data.append(-3); - data.append(1); data.append(0); + data.append(-2); + data.append(-1); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i32_broadcast/input_1.cairo b/tests/nodes/equal_i32_broadcast/input_1.cairo index 8b33cf367..d0bca2c7f 100644 --- a/tests/nodes/equal_i32_broadcast/input_1.cairo +++ b/tests/nodes/equal_i32_broadcast/input_1.cairo @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); + data.append(0); + data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i32_broadcast/output_0.cairo b/tests/nodes/equal_i32_broadcast/output_0.cairo index 75e094812..ce4201837 100644 --- a/tests/nodes/equal_i32_broadcast/output_0.cairo +++ b/tests/nodes/equal_i32_broadcast/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(true); - data.append(false); - data.append(true); - data.append(false); + data.append(1); + data.append(1); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i8.cairo b/tests/nodes/equal_i8.cairo index e5c0184fc..3f3d3661d 100644 --- a/tests/nodes/equal_i8.cairo +++ b/tests/nodes/equal_i8.cairo @@ -4,12 +4,12 @@ mod output_0; use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/equal_i8/input_0.cairo b/tests/nodes/equal_i8/input_0.cairo index c89afb29b..281102a79 100644 --- a/tests/nodes/equal_i8/input_0.cairo +++ b/tests/nodes/equal_i8/input_0.cairo @@ -12,30 +12,30 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); data.append(-1); - data.append(-3); - data.append(0); - data.append(0); + data.append(-1); + data.append(1); data.append(-2); data.append(2); - data.append(-1); - data.append(0); data.append(-2); + data.append(-1); data.append(1); + data.append(-3); + data.append(-1); data.append(-2); data.append(1); + data.append(-1); data.append(-3); - data.append(2); - data.append(-3); - data.append(-3); - data.append(2); + data.append(1); data.append(-3); data.append(-3); + data.append(0); data.append(-3); + data.append(-1); + data.append(-1); + data.append(-1); + data.append(0); data.append(-3); - data.append(2); - data.append(-2); data.append(-1); - data.append(2); data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i8/input_1.cairo b/tests/nodes/equal_i8/input_1.cairo index c27187429..3e13a87c6 100644 --- a/tests/nodes/equal_i8/input_1.cairo +++ b/tests/nodes/equal_i8/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(-1); + data.append(-1); data.append(-3); data.append(0); - data.append(-3); data.append(-2); - data.append(-3); + data.append(1); data.append(2); + data.append(0); data.append(2); - data.append(-2); data.append(2); - data.append(-2); - data.append(-1); - data.append(1); + data.append(0); data.append(2); data.append(-1); - data.append(-1); data.append(1); - data.append(0); + data.append(1); data.append(2); - data.append(-2); + data.append(0); data.append(-2); data.append(-3); data.append(-2); - data.append(2); data.append(-1); - data.append(-2); - data.append(0); + data.append(-3); + data.append(-3); + data.append(1); + data.append(1); + data.append(-3); data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i8/output_0.cairo b/tests/nodes/equal_i8/output_0.cairo index 798787e19..7c4daa680 100644 --- a/tests/nodes/equal_i8/output_0.cairo +++ b/tests/nodes/equal_i8/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i8_broadcast.cairo b/tests/nodes/equal_i8_broadcast.cairo index 2713598b4..b8f66a412 100644 --- a/tests/nodes/equal_i8_broadcast.cairo +++ b/tests/nodes/equal_i8_broadcast.cairo @@ -4,12 +4,12 @@ mod output_0; use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/equal_i8_broadcast/input_0.cairo b/tests/nodes/equal_i8_broadcast/input_0.cairo index 3f1c3803b..295af2729 100644 --- a/tests/nodes/equal_i8_broadcast/input_0.cairo +++ b/tests/nodes/equal_i8_broadcast/input_0.cairo @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); + data.append(-2); data.append(1); - data.append(1); - data.append(-1); + data.append(0); data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i8_broadcast/input_1.cairo b/tests/nodes/equal_i8_broadcast/input_1.cairo index e09cd290a..aa5546ee5 100644 --- a/tests/nodes/equal_i8_broadcast/input_1.cairo +++ b/tests/nodes/equal_i8_broadcast/input_1.cairo @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); + data.append(-1); data.append(0); - data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i8_broadcast/output_0.cairo b/tests/nodes/equal_i8_broadcast/output_0.cairo index 44d7202af..705f7ffc6 100644 --- a/tests/nodes/equal_i8_broadcast/output_0.cairo +++ b/tests/nodes/equal_i8_broadcast/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(false); - data.append(true); + data.append(0); + data.append(0); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32.cairo b/tests/nodes/equal_u32.cairo index ef7a18f53..c53070394 100644 --- a/tests/nodes/equal_u32.cairo +++ b/tests/nodes/equal_u32.cairo @@ -3,13 +3,11 @@ mod input_1; mod output_0; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::BoolTensor; use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/equal_u32/input_0.cairo b/tests/nodes/equal_u32/input_0.cairo index 54f581c79..bcdfa640a 100644 --- a/tests/nodes/equal_u32/input_0.cairo +++ b/tests/nodes/equal_u32/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(5); - data.append(5); - data.append(1); + data.append(2); + data.append(0); data.append(3); - data.append(5); + data.append(0); + data.append(1); data.append(3); - data.append(2); data.append(1); - data.append(5); - data.append(0); data.append(4); - data.append(5); - data.append(5); data.append(2); + data.append(4); data.append(5); - data.append(3); data.append(4); - data.append(1); - data.append(0); - data.append(2); data.append(5); + data.append(5); + data.append(0); + data.append(4); data.append(3); data.append(3); + data.append(0); + data.append(3); + data.append(1); data.append(5); + data.append(4); + data.append(4); + data.append(3); + data.append(4); data.append(5); - data.append(5); - data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32/input_1.cairo b/tests/nodes/equal_u32/input_1.cairo index 4545dcb02..d64072a1a 100644 --- a/tests/nodes/equal_u32/input_1.cairo +++ b/tests/nodes/equal_u32/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(4); - data.append(3); - data.append(5); + data.append(0); + data.append(2); data.append(2); - data.append(4); data.append(1); - data.append(5); data.append(2); - data.append(0); + data.append(3); data.append(2); - data.append(1); data.append(4); - data.append(3); data.append(0); - data.append(5); - data.append(1); - data.append(5); - data.append(5); data.append(3); + data.append(2); data.append(0); - data.append(1); + data.append(0); + data.append(4); data.append(3); + data.append(2); + data.append(4); + data.append(2); data.append(0); + data.append(2); + data.append(4); data.append(3); + data.append(0); + data.append(2); data.append(5); - data.append(3); data.append(2); + data.append(5); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32/output_0.cairo b/tests/nodes/equal_u32/output_0.cairo index 573b6cac7..c07a9a491 100644 --- a/tests/nodes/equal_u32/output_0.cairo +++ b/tests/nodes/equal_u32/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32_broadcast.cairo b/tests/nodes/equal_u32_broadcast.cairo index 44d663e38..94e9022be 100644 --- a/tests/nodes/equal_u32_broadcast.cairo +++ b/tests/nodes/equal_u32_broadcast.cairo @@ -3,13 +3,11 @@ mod input_1; mod output_0; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::BoolTensor; use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/equal_u32_broadcast/input_0.cairo b/tests/nodes/equal_u32_broadcast/input_0.cairo index bb26dfe29..f0fc27e10 100644 --- a/tests/nodes/equal_u32_broadcast/input_0.cairo +++ b/tests/nodes/equal_u32_broadcast/input_0.cairo @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); + data.append(4); data.append(3); - data.append(0); - data.append(3); - data.append(1); + data.append(4); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32_broadcast/input_1.cairo b/tests/nodes/equal_u32_broadcast/input_1.cairo index 39ab80b0c..dcf2e9f1f 100644 --- a/tests/nodes/equal_u32_broadcast/input_1.cairo +++ b/tests/nodes/equal_u32_broadcast/input_1.cairo @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(3); data.append(0); + data.append(4); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32_broadcast/output_0.cairo b/tests/nodes/equal_u32_broadcast/output_0.cairo index f757d7d01..705f7ffc6 100644 --- a/tests/nodes/equal_u32_broadcast/output_0.cairo +++ b/tests/nodes/equal_u32_broadcast/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(true); - data.append(true); - data.append(true); - data.append(false); + data.append(0); + data.append(0); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_inf_fp16x16.cairo b/tests/nodes/is_inf_fp16x16.cairo deleted file mode 100644 index d09a4a6f8..000000000 --- a/tests/nodes/is_inf_fp16x16.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::FP16x16TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::BoolTensor; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; - -#[test] -#[available_gas(2000000000)] -fn test_is_inf_fp16x16() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = TensorTrait::is_inf(@input_0, Option::None, Option::None); - - assert_eq(y, z); -} diff --git a/tests/nodes/is_inf_fp16x16/input_0.cairo b/tests/nodes/is_inf_fp16x16/input_0.cairo deleted file mode 100644 index 439f44bf1..000000000 --- a/tests/nodes/is_inf_fp16x16/input_0.cairo +++ /dev/null @@ -1,18 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(6); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 78643, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FixedTrait::NEG_INF()); - data.append(FP16x16 { mag: 183500, sign: false }); - data.append(FixedTrait::POS_INF()); - data.append(FixedTrait::NEG_INF()); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/is_inf_fp16x16/output_0.cairo b/tests/nodes/is_inf_fp16x16/output_0.cairo deleted file mode 100644 index 059edbf71..000000000 --- a/tests/nodes/is_inf_fp16x16/output_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(6); - - let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/is_inf_fp8x23.cairo b/tests/nodes/is_inf_fp8x23.cairo deleted file mode 100644 index a65951dd7..000000000 --- a/tests/nodes/is_inf_fp8x23.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::BoolTensor; -use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_is_inf_fp8x23() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = TensorTrait::is_inf(@input_0, Option::None, Option::None); - - assert_eq(y, z); -} diff --git a/tests/nodes/is_inf_fp8x23/input_0.cairo b/tests/nodes/is_inf_fp8x23/input_0.cairo deleted file mode 100644 index 29b4c52e7..000000000 --- a/tests/nodes/is_inf_fp8x23/input_0.cairo +++ /dev/null @@ -1,18 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(6); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 10066329, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FixedTrait::NEG_INF()); - data.append(FP8x23 { mag: 23488102, sign: false }); - data.append(FixedTrait::POS_INF()); - data.append(FixedTrait::NEG_INF()); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/is_inf_fp8x23/output_0.cairo b/tests/nodes/is_inf_fp8x23/output_0.cairo deleted file mode 100644 index 059edbf71..000000000 --- a/tests/nodes/is_inf_fp8x23/output_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(6); - - let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/is_inf_i32.cairo b/tests/nodes/is_inf_i32.cairo index 3dd3b234d..dfa790fd0 100644 --- a/tests/nodes/is_inf_i32.cairo +++ b/tests/nodes/is_inf_i32.cairo @@ -2,21 +2,21 @@ mod input_0; mod output_0; +use orion::operators::tensor::U32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::BoolTensor; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; #[test] #[available_gas(2000000000)] fn test_is_inf_i32() { let input_0 = input_0::input_0(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = TensorTrait::is_inf(@input_0, Option::None, Option::None); + let y_0 = TensorTrait::is_inf(@input_0, Option::None, Option::None); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/is_inf_i32/input_0.cairo b/tests/nodes/is_inf_i32/input_0.cairo index 7b44e2c7c..0bdb9d040 100644 --- a/tests/nodes/is_inf_i32/input_0.cairo +++ b/tests/nodes/is_inf_i32/input_0.cairo @@ -1,7 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::{NumberTrait}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,9 +10,9 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(-1); data.append(0); - data.append(NumberTrait::INF()); + data.append(-1); data.append(8); - data.append(NumberTrait::INF() * -1); - data.append(NumberTrait::INF()); + data.append(1); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_inf_i32/output_0.cairo b/tests/nodes/is_inf_i32/output_0.cairo index 059edbf71..05f1fa0a3 100644 --- a/tests/nodes/is_inf_i32/output_0.cairo +++ b/tests/nodes/is_inf_i32/output_0.cairo @@ -1,17 +1,18 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(6); let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(true); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_inf_i8.cairo b/tests/nodes/is_inf_i8.cairo deleted file mode 100644 index 251e01480..000000000 --- a/tests/nodes/is_inf_i8.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::BoolTensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::BoolTensor; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I8TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_is_inf_i8() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = TensorTrait::is_inf(@input_0, Option::None, Option::None); - - assert_eq(y, z); -} diff --git a/tests/nodes/is_inf_i8/input_0.cairo b/tests/nodes/is_inf_i8/input_0.cairo deleted file mode 100644 index e9af358c2..000000000 --- a/tests/nodes/is_inf_i8/input_0.cairo +++ /dev/null @@ -1,18 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::{NumberTrait}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(6); - - let mut data = ArrayTrait::new(); - data.append(-1); - data.append(0); - data.append(NumberTrait::INF()); - data.append(8); - data.append(NumberTrait::INF() * -1); - data.append(NumberTrait::INF()); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/is_inf_i8/output_0.cairo b/tests/nodes/is_inf_i8/output_0.cairo deleted file mode 100644 index 059edbf71..000000000 --- a/tests/nodes/is_inf_i8/output_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(6); - - let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/is_inf_u32.cairo b/tests/nodes/is_inf_u32.cairo deleted file mode 100644 index 21cd1a9fc..000000000 --- a/tests/nodes/is_inf_u32.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::BoolTensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::BoolTensor; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_is_inf_u32() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = TensorTrait::is_inf(@input_0, Option::None, Option::None); - - assert_eq(y, z); -} diff --git a/tests/nodes/is_inf_u32/input_0.cairo b/tests/nodes/is_inf_u32/input_0.cairo deleted file mode 100644 index a7bc9ea61..000000000 --- a/tests/nodes/is_inf_u32/input_0.cairo +++ /dev/null @@ -1,18 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(6); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); - data.append(NumberTrait::INF()); - data.append(8); - data.append(NumberTrait::INF()); - data.append(NumberTrait::INF()); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/is_inf_u32/output_0.cairo b/tests/nodes/is_inf_u32/output_0.cairo deleted file mode 100644 index 059edbf71..000000000 --- a/tests/nodes/is_inf_u32/output_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(6); - - let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/is_nan_fp16x16.cairo b/tests/nodes/is_nan_fp16x16.cairo index 4b5de16f4..70c1085da 100644 --- a/tests/nodes/is_nan_fp16x16.cairo +++ b/tests/nodes/is_nan_fp16x16.cairo @@ -2,21 +2,21 @@ mod input_0; mod output_0; -use orion::operators::tensor::BoolTensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; #[test] #[available_gas(2000000000)] fn test_is_nan_fp16x16() { let input_0 = input_0::input_0(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = TensorTrait::is_nan(@input_0); + let y_0 = TensorTrait::is_nan(@input_0); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/is_nan_fp16x16/input_0.cairo b/tests/nodes/is_nan_fp16x16/input_0.cairo index 8c86af4fb..4a28acbb2 100644 --- a/tests/nodes/is_nan_fp16x16/input_0.cairo +++ b/tests/nodes/is_nan_fp16x16/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -10,9 +10,9 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP16x16 { mag: 78643, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FixedTrait::NaN()); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 183500, sign: false }); - data.append(FixedTrait::NaN()); - data.append(FixedTrait::NaN()); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_nan_fp16x16/output_0.cairo b/tests/nodes/is_nan_fp16x16/output_0.cairo index 059edbf71..05f1fa0a3 100644 --- a/tests/nodes/is_nan_fp16x16/output_0.cairo +++ b/tests/nodes/is_nan_fp16x16/output_0.cairo @@ -1,17 +1,18 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(6); let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(true); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_nan_fp8x23.cairo b/tests/nodes/is_nan_fp8x23.cairo deleted file mode 100644 index 7f1d9682b..000000000 --- a/tests/nodes/is_nan_fp8x23.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::BoolTensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::BoolTensor; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_is_nan_fp8x23() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = TensorTrait::is_nan(@input_0); - - assert_eq(y, z); -} diff --git a/tests/nodes/is_neg_inf_fp16x16.cairo b/tests/nodes/is_neg_inf_fp16x16.cairo deleted file mode 100644 index 99417cdae..000000000 --- a/tests/nodes/is_neg_inf_fp16x16.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::FP16x16TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::BoolTensor; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; - -#[test] -#[available_gas(2000000000)] -fn test_is_neg_inf_fp16x16() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0)); - - assert_eq(y, z); -} diff --git a/tests/nodes/is_neg_inf_fp16x16/input_0.cairo b/tests/nodes/is_neg_inf_fp16x16/input_0.cairo deleted file mode 100644 index 3da48092e..000000000 --- a/tests/nodes/is_neg_inf_fp16x16/input_0.cairo +++ /dev/null @@ -1,18 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(6); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 1, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FixedTrait::POS_INF()); - data.append(FP16x16 { mag: 2, sign: false }); - data.append(FixedTrait::NEG_INF()); - data.append(FixedTrait::POS_INF()); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/is_neg_inf_fp16x16/output_0.cairo b/tests/nodes/is_neg_inf_fp16x16/output_0.cairo deleted file mode 100644 index 0e3c52449..000000000 --- a/tests/nodes/is_neg_inf_fp16x16/output_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(6); - - let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/is_neg_inf_fp8x23.cairo b/tests/nodes/is_neg_inf_fp8x23.cairo deleted file mode 100644 index fd053abab..000000000 --- a/tests/nodes/is_neg_inf_fp8x23.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::BoolTensor; -use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_is_neg_inf_fp8x23() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0)); - - assert_eq(y, z); -} diff --git a/tests/nodes/is_neg_inf_fp8x23/input_0.cairo b/tests/nodes/is_neg_inf_fp8x23/input_0.cairo deleted file mode 100644 index 9b60362be..000000000 --- a/tests/nodes/is_neg_inf_fp8x23/input_0.cairo +++ /dev/null @@ -1,18 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(6); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 1, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FixedTrait::POS_INF()); - data.append(FP8x23 { mag: 2, sign: false }); - data.append(FixedTrait::NEG_INF()); - data.append(FixedTrait::POS_INF()); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/is_neg_inf_fp8x23/output_0.cairo b/tests/nodes/is_neg_inf_fp8x23/output_0.cairo deleted file mode 100644 index 0e3c52449..000000000 --- a/tests/nodes/is_neg_inf_fp8x23/output_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(6); - - let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/is_neg_inf_i32.cairo b/tests/nodes/is_neg_inf_i32.cairo index 4cdad3051..054c70aed 100644 --- a/tests/nodes/is_neg_inf_i32.cairo +++ b/tests/nodes/is_neg_inf_i32.cairo @@ -2,21 +2,21 @@ mod input_0; mod output_0; +use orion::operators::tensor::U32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::BoolTensor; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; #[test] #[available_gas(2000000000)] fn test_is_neg_inf_i32() { let input_0 = input_0::input_0(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0)); + let y_0 = TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0)); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/is_neg_inf_i32/input_0.cairo b/tests/nodes/is_neg_inf_i32/input_0.cairo index 7b44e2c7c..0bdb9d040 100644 --- a/tests/nodes/is_neg_inf_i32/input_0.cairo +++ b/tests/nodes/is_neg_inf_i32/input_0.cairo @@ -1,7 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::{NumberTrait}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,9 +10,9 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(-1); data.append(0); - data.append(NumberTrait::INF()); + data.append(-1); data.append(8); - data.append(NumberTrait::INF() * -1); - data.append(NumberTrait::INF()); + data.append(1); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_neg_inf_i32/output_0.cairo b/tests/nodes/is_neg_inf_i32/output_0.cairo index 0e3c52449..c687bbb11 100644 --- a/tests/nodes/is_neg_inf_i32/output_0.cairo +++ b/tests/nodes/is_neg_inf_i32/output_0.cairo @@ -1,17 +1,18 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(6); let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_neg_inf_i8.cairo b/tests/nodes/is_neg_inf_i8.cairo deleted file mode 100644 index 3bde58b79..000000000 --- a/tests/nodes/is_neg_inf_i8.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::BoolTensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::BoolTensor; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I8TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_is_neg_inf_i8() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0)); - - assert_eq(y, z); -} diff --git a/tests/nodes/is_neg_inf_i8/input_0.cairo b/tests/nodes/is_neg_inf_i8/input_0.cairo deleted file mode 100644 index e9af358c2..000000000 --- a/tests/nodes/is_neg_inf_i8/input_0.cairo +++ /dev/null @@ -1,18 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::{NumberTrait}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(6); - - let mut data = ArrayTrait::new(); - data.append(-1); - data.append(0); - data.append(NumberTrait::INF()); - data.append(8); - data.append(NumberTrait::INF() * -1); - data.append(NumberTrait::INF()); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/is_neg_inf_i8/output_0.cairo b/tests/nodes/is_neg_inf_i8/output_0.cairo deleted file mode 100644 index 0e3c52449..000000000 --- a/tests/nodes/is_neg_inf_i8/output_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(6); - - let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/is_pos_inf_fp16x16.cairo b/tests/nodes/is_pos_inf_fp16x16.cairo deleted file mode 100644 index 2d669088c..000000000 --- a/tests/nodes/is_pos_inf_fp16x16.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::FP16x16TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::BoolTensor; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; - -#[test] -#[available_gas(2000000000)] -fn test_is_pos_inf_fp16x16() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1)); - - assert_eq(y, z); -} diff --git a/tests/nodes/is_pos_inf_fp16x16/input_0.cairo b/tests/nodes/is_pos_inf_fp16x16/input_0.cairo deleted file mode 100644 index 68441d517..000000000 --- a/tests/nodes/is_pos_inf_fp16x16/input_0.cairo +++ /dev/null @@ -1,18 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(6); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 1, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 4294967295, sign: false }); - data.append(FP16x16 { mag: 2, sign: false }); - data.append(FP16x16 { mag: 4294967295, sign: true }); - data.append(FP16x16 { mag: 4294967295, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/is_pos_inf_fp16x16/output_0.cairo b/tests/nodes/is_pos_inf_fp16x16/output_0.cairo deleted file mode 100644 index 08be59d64..000000000 --- a/tests/nodes/is_pos_inf_fp16x16/output_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(6); - - let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/is_pos_inf_fp8x23.cairo b/tests/nodes/is_pos_inf_fp8x23.cairo deleted file mode 100644 index 85f70322a..000000000 --- a/tests/nodes/is_pos_inf_fp8x23.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::BoolTensor; -use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_is_pos_inf_fp8x23() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1)); - - assert_eq(y, z); -} diff --git a/tests/nodes/is_pos_inf_fp8x23/input_0.cairo b/tests/nodes/is_pos_inf_fp8x23/input_0.cairo deleted file mode 100644 index cc50787fb..000000000 --- a/tests/nodes/is_pos_inf_fp8x23/input_0.cairo +++ /dev/null @@ -1,18 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(6); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 1, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 4294967295, sign: false }); - data.append(FP8x23 { mag: 2, sign: false }); - data.append(FP8x23 { mag: 4294967295, sign: true }); - data.append(FP8x23 { mag: 4294967295, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/is_pos_inf_fp8x23/output_0.cairo b/tests/nodes/is_pos_inf_fp8x23/output_0.cairo deleted file mode 100644 index 08be59d64..000000000 --- a/tests/nodes/is_pos_inf_fp8x23/output_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(6); - - let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/is_pos_inf_i32.cairo b/tests/nodes/is_pos_inf_i32.cairo index 35eea6426..7227fafc6 100644 --- a/tests/nodes/is_pos_inf_i32.cairo +++ b/tests/nodes/is_pos_inf_i32.cairo @@ -2,21 +2,21 @@ mod input_0; mod output_0; +use orion::operators::tensor::U32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::BoolTensor; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; #[test] #[available_gas(2000000000)] fn test_is_pos_inf_i32() { let input_0 = input_0::input_0(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1)); + let y_0 = TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1)); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/is_pos_inf_i32/input_0.cairo b/tests/nodes/is_pos_inf_i32/input_0.cairo index 7b44e2c7c..0bdb9d040 100644 --- a/tests/nodes/is_pos_inf_i32/input_0.cairo +++ b/tests/nodes/is_pos_inf_i32/input_0.cairo @@ -1,7 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::{NumberTrait}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,9 +10,9 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(-1); data.append(0); - data.append(NumberTrait::INF()); + data.append(-1); data.append(8); - data.append(NumberTrait::INF() * -1); - data.append(NumberTrait::INF()); + data.append(1); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_pos_inf_i32/output_0.cairo b/tests/nodes/is_pos_inf_i32/output_0.cairo index 08be59d64..96c5928ea 100644 --- a/tests/nodes/is_pos_inf_i32/output_0.cairo +++ b/tests/nodes/is_pos_inf_i32/output_0.cairo @@ -1,17 +1,18 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(6); let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(true); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_pos_inf_i8.cairo b/tests/nodes/is_pos_inf_i8.cairo deleted file mode 100644 index 5dc40cce1..000000000 --- a/tests/nodes/is_pos_inf_i8.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::BoolTensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::BoolTensor; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I8TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_is_pos_inf_i8() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1)); - - assert_eq(y, z); -} diff --git a/tests/nodes/is_pos_inf_i8/input_0.cairo b/tests/nodes/is_pos_inf_i8/input_0.cairo deleted file mode 100644 index e9af358c2..000000000 --- a/tests/nodes/is_pos_inf_i8/input_0.cairo +++ /dev/null @@ -1,18 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::{NumberTrait}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(6); - - let mut data = ArrayTrait::new(); - data.append(-1); - data.append(0); - data.append(NumberTrait::INF()); - data.append(8); - data.append(NumberTrait::INF() * -1); - data.append(NumberTrait::INF()); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/is_pos_inf_i8/output_0.cairo b/tests/nodes/is_pos_inf_i8/output_0.cairo deleted file mode 100644 index 08be59d64..000000000 --- a/tests/nodes/is_pos_inf_i8/output_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(6); - - let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/less_fp16x16.cairo b/tests/nodes/less_fp16x16.cairo index 0f464463a..d2d62e5bc 100644 --- a/tests/nodes/less_fp16x16.cairo +++ b/tests/nodes/less_fp16x16.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::operators::tensor::FP16x16TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::FP16x16TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_fp16x16/input_0.cairo b/tests/nodes/less_fp16x16/input_0.cairo index 12c4be2fd..196bfa800 100644 --- a/tests/nodes/less_fp16x16/input_0.cairo +++ b/tests/nodes/less_fp16x16/input_0.cairo @@ -10,31 +10,31 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/less_fp16x16/input_1.cairo b/tests/nodes/less_fp16x16/input_1.cairo index 8c6b3809a..417f0b2c9 100644 --- a/tests/nodes/less_fp16x16/input_1.cairo +++ b/tests/nodes/less_fp16x16/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16/output_0.cairo b/tests/nodes/less_fp16x16/output_0.cairo index 19b525d8f..a63249875 100644 --- a/tests/nodes/less_fp16x16/output_0.cairo +++ b/tests/nodes/less_fp16x16/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(false); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16_broadcast.cairo b/tests/nodes/less_fp16x16_broadcast.cairo index 750e149d7..300ce8633 100644 --- a/tests/nodes/less_fp16x16_broadcast.cairo +++ b/tests/nodes/less_fp16x16_broadcast.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::operators::tensor::FP16x16TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::FP16x16TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_fp16x16_broadcast/input_0.cairo b/tests/nodes/less_fp16x16_broadcast/input_0.cairo index cab41faaa..6f1019bfb 100644 --- a/tests/nodes/less_fp16x16_broadcast/input_0.cairo +++ b/tests/nodes/less_fp16x16_broadcast/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16_broadcast/input_1.cairo b/tests/nodes/less_fp16x16_broadcast/input_1.cairo index 65ffb99b7..e28fda5fc 100644 --- a/tests/nodes/less_fp16x16_broadcast/input_1.cairo +++ b/tests/nodes/less_fp16x16_broadcast/input_1.cairo @@ -11,7 +11,7 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16_broadcast/output_0.cairo b/tests/nodes/less_fp16x16_broadcast/output_0.cairo index b638d7ea1..e98ba8452 100644 --- a/tests/nodes/less_fp16x16_broadcast/output_0.cairo +++ b/tests/nodes/less_fp16x16_broadcast/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(true); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(1); + data.append(1); + data.append(0); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23.cairo b/tests/nodes/less_fp8x23.cairo index 608b0e1e0..a9e6a56f8 100644 --- a/tests/nodes/less_fp8x23.cairo +++ b/tests/nodes/less_fp8x23.cairo @@ -4,12 +4,12 @@ mod output_0; use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP8x23TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_fp8x23/input_0.cairo b/tests/nodes/less_fp8x23/input_0.cairo index aee69b9c7..78b7afc4c 100644 --- a/tests/nodes/less_fp8x23/input_0.cairo +++ b/tests/nodes/less_fp8x23/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23/input_1.cairo b/tests/nodes/less_fp8x23/input_1.cairo index a013faaea..c406deb82 100644 --- a/tests/nodes/less_fp8x23/input_1.cairo +++ b/tests/nodes/less_fp8x23/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23/output_0.cairo b/tests/nodes/less_fp8x23/output_0.cairo index 180bf7d2e..8384ae7c3 100644 --- a/tests/nodes/less_fp8x23/output_0.cairo +++ b/tests/nodes/less_fp8x23/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(false); - data.append(true); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23_broadcast.cairo b/tests/nodes/less_fp8x23_broadcast.cairo index 19bdf417c..8f30d9941 100644 --- a/tests/nodes/less_fp8x23_broadcast.cairo +++ b/tests/nodes/less_fp8x23_broadcast.cairo @@ -4,12 +4,12 @@ mod output_0; use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP8x23TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_fp8x23_broadcast/input_0.cairo b/tests/nodes/less_fp8x23_broadcast/input_0.cairo index 425ca9e2e..b3dfbb2e4 100644 --- a/tests/nodes/less_fp8x23_broadcast/input_0.cairo +++ b/tests/nodes/less_fp8x23_broadcast/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23_broadcast/input_1.cairo b/tests/nodes/less_fp8x23_broadcast/input_1.cairo index d7e956748..11c1e2841 100644 --- a/tests/nodes/less_fp8x23_broadcast/input_1.cairo +++ b/tests/nodes/less_fp8x23_broadcast/input_1.cairo @@ -10,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23_broadcast/output_0.cairo b/tests/nodes/less_fp8x23_broadcast/output_0.cairo index 05547a4b4..f05fe8301 100644 --- a/tests/nodes/less_fp8x23_broadcast/output_0.cairo +++ b/tests/nodes/less_fp8x23_broadcast/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(true); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32.cairo b/tests/nodes/less_i32.cairo index 9c5cf2bbf..f2648c188 100644 --- a/tests/nodes/less_i32.cairo +++ b/tests/nodes/less_i32.cairo @@ -4,12 +4,12 @@ mod output_0; use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_i32/input_0.cairo b/tests/nodes/less_i32/input_0.cairo index d89055483..e534eab72 100644 --- a/tests/nodes/less_i32/input_0.cairo +++ b/tests/nodes/less_i32/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); + data.append(-3); data.append(0); data.append(-2); + data.append(-1); + data.append(-3); data.append(2); - data.append(1); - data.append(1); - data.append(1); - data.append(1); data.append(0); - data.append(-1); - data.append(-1); data.append(0); + data.append(-1); + data.append(-3); data.append(0); data.append(-3); data.append(1); + data.append(0); + data.append(1); data.append(-1); - data.append(-3); - data.append(-3); - data.append(2); data.append(-1); data.append(0); - data.append(-2); data.append(2); + data.append(0); data.append(-1); + data.append(2); + data.append(2); + data.append(-2); + data.append(0); + data.append(0); data.append(1); - data.append(-3); - data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32/input_1.cairo b/tests/nodes/less_i32/input_1.cairo index a33bfe41b..a783f55d8 100644 --- a/tests/nodes/less_i32/input_1.cairo +++ b/tests/nodes/less_i32/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(2); - data.append(0); - data.append(1); - data.append(2); data.append(1); - data.append(-1); - data.append(-2); + data.append(-3); data.append(0); data.append(-3); + data.append(-3); + data.append(-2); + data.append(-3); + data.append(-3); + data.append(2); data.append(0); data.append(1); - data.append(2); - data.append(-2); data.append(-2); + data.append(1); + data.append(0); data.append(2); + data.append(0); data.append(2); data.append(2); - data.append(0); - data.append(1); data.append(-3); data.append(-3); - data.append(-2); - data.append(-2); - data.append(-2); data.append(-3); + data.append(-1); + data.append(2); data.append(-3); + data.append(1); data.append(2); + data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32/output_0.cairo b/tests/nodes/less_i32/output_0.cairo index 935f05c3b..d33bd84f2 100644 --- a/tests/nodes/less_i32/output_0.cairo +++ b/tests/nodes/less_i32/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(true); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); + data.append(1); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32_broadcast.cairo b/tests/nodes/less_i32_broadcast.cairo index f82c428a8..652384e37 100644 --- a/tests/nodes/less_i32_broadcast.cairo +++ b/tests/nodes/less_i32_broadcast.cairo @@ -4,12 +4,12 @@ mod output_0; use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_i32_broadcast/input_0.cairo b/tests/nodes/less_i32_broadcast/input_0.cairo index bc2944af3..65654c2ba 100644 --- a/tests/nodes/less_i32_broadcast/input_0.cairo +++ b/tests/nodes/less_i32_broadcast/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(2); - data.append(1); data.append(-2); - data.append(2); - data.append(1); data.append(0); - data.append(-3); - data.append(-1); - data.append(-1); - data.append(-3); + data.append(1); data.append(0); data.append(-3); - data.append(-3); - data.append(-1); + data.append(1); data.append(-1); data.append(1); - data.append(2); + data.append(1); data.append(0); - data.append(-2); - data.append(-2); + data.append(2); data.append(1); - data.append(-3); + data.append(-2); data.append(1); data.append(-2); data.append(-2); data.append(-1); + data.append(-1); data.append(-2); + data.append(2); + data.append(-1); + data.append(1); + data.append(-3); + data.append(0); + data.append(0); + data.append(2); + data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32_broadcast/input_1.cairo b/tests/nodes/less_i32_broadcast/input_1.cairo index 4f6d1b719..68175cdbb 100644 --- a/tests/nodes/less_i32_broadcast/input_1.cairo +++ b/tests/nodes/less_i32_broadcast/input_1.cairo @@ -10,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(-2); - data.append(2); - data.append(-1); + data.append(1); + data.append(-3); + data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32_broadcast/output_0.cairo b/tests/nodes/less_i32_broadcast/output_0.cairo index 3ffcf6eac..f68184301 100644 --- a/tests/nodes/less_i32_broadcast/output_0.cairo +++ b/tests/nodes/less_i32_broadcast/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(true); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8.cairo b/tests/nodes/less_i8.cairo index 00d6dddd4..5e7e38c72 100644 --- a/tests/nodes/less_i8.cairo +++ b/tests/nodes/less_i8.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I8TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I8TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_i8/input_0.cairo b/tests/nodes/less_i8/input_0.cairo index b2c30bed7..2ab7aa6a9 100644 --- a/tests/nodes/less_i8/input_0.cairo +++ b/tests/nodes/less_i8/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(2); + data.append(2); + data.append(-2); data.append(1); data.append(-3); + data.append(-1); + data.append(-3); + data.append(-1); data.append(1); data.append(1); - data.append(-3); data.append(0); data.append(-3); - data.append(0); - data.append(2); data.append(-2); data.append(2); - data.append(-2); + data.append(1); + data.append(1); + data.append(0); data.append(0); - data.append(-2); - data.append(-1); - data.append(-2); - data.append(-3); - data.append(-1); - data.append(-1); - data.append(-3); - data.append(2); - data.append(2); data.append(1); - data.append(-3); data.append(1); data.append(2); + data.append(1); data.append(-1); + data.append(-3); + data.append(-1); + data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8/input_1.cairo b/tests/nodes/less_i8/input_1.cairo index cde86d6da..4f243f384 100644 --- a/tests/nodes/less_i8/input_1.cairo +++ b/tests/nodes/less_i8/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(2); - data.append(-3); - data.append(-3); - data.append(-2); - data.append(1); data.append(-1); - data.append(-3); data.append(0); data.append(0); - data.append(-2); data.append(-1); - data.append(-1); - data.append(-2); + data.append(1); + data.append(0); + data.append(1); data.append(0); - data.append(-3); data.append(0); data.append(2); + data.append(-1); data.append(0); - data.append(1); data.append(2); - data.append(-3); + data.append(2); + data.append(1); data.append(0); + data.append(-2); data.append(-1); - data.append(0); + data.append(1); + data.append(2); data.append(-1); + data.append(-3); data.append(-2); data.append(-1); + data.append(-1); + data.append(-2); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8/output_0.cairo b/tests/nodes/less_i8/output_0.cairo index 0f911f707..deaa47e94 100644 --- a/tests/nodes/less_i8/output_0.cairo +++ b/tests/nodes/less_i8/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(1); + data.append(0); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8_broadcast.cairo b/tests/nodes/less_i8_broadcast.cairo index d100935c3..11522179f 100644 --- a/tests/nodes/less_i8_broadcast.cairo +++ b/tests/nodes/less_i8_broadcast.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I8TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I8TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_i8_broadcast/input_0.cairo b/tests/nodes/less_i8_broadcast/input_0.cairo index 19ab58b2d..1a26a2ad9 100644 --- a/tests/nodes/less_i8_broadcast/input_0.cairo +++ b/tests/nodes/less_i8_broadcast/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-1); - data.append(-3); - data.append(0); - data.append(2); - data.append(0); data.append(-3); + data.append(-2); data.append(1); + data.append(-3); data.append(2); - data.append(0); + data.append(-2); + data.append(-1); + data.append(-2); data.append(2); data.append(-3); data.append(-1); - data.append(1); data.append(-2); data.append(-3); - data.append(-2); + data.append(0); + data.append(-1); data.append(1); + data.append(2); data.append(-1); data.append(2); - data.append(0); data.append(-1); - data.append(-2); - data.append(0); + data.append(-3); + data.append(-1); + data.append(-1); data.append(1); - data.append(-2); - data.append(-2); + data.append(0); data.append(1); + data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8_broadcast/input_1.cairo b/tests/nodes/less_i8_broadcast/input_1.cairo index 88ed8e242..5dd4851ef 100644 --- a/tests/nodes/less_i8_broadcast/input_1.cairo +++ b/tests/nodes/less_i8_broadcast/input_1.cairo @@ -10,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(1); - data.append(-3); - data.append(1); + data.append(0); + data.append(-2); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8_broadcast/output_0.cairo b/tests/nodes/less_i8_broadcast/output_0.cairo index ef4e9369d..5c189b150 100644 --- a/tests/nodes/less_i8_broadcast/output_0.cairo +++ b/tests/nodes/less_i8_broadcast/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(false); + data.append(1); + data.append(1); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(1); + data.append(0); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32.cairo b/tests/nodes/less_u32.cairo index b6546f1a2..20edb7d18 100644 --- a/tests/nodes/less_u32.cairo +++ b/tests/nodes/less_u32.cairo @@ -3,13 +3,11 @@ mod input_1; mod output_0; -use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; -use core::array::{ArrayTrait, SpanTrait}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_u32/input_0.cairo b/tests/nodes/less_u32/input_0.cairo index 89a0d8072..351e02b1c 100644 --- a/tests/nodes/less_u32/input_0.cairo +++ b/tests/nodes/less_u32/input_0.cairo @@ -12,30 +12,30 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(3); data.append(3); - data.append(1); - data.append(3); - data.append(1); + data.append(5); data.append(0); + data.append(3); + data.append(4); + data.append(3); + data.append(3); data.append(0); - data.append(1); data.append(0); data.append(3); - data.append(2); - data.append(4); + data.append(0); + data.append(1); data.append(4); + data.append(1); data.append(5); data.append(3); - data.append(1); - data.append(1); + data.append(4); data.append(5); data.append(5); data.append(0); + data.append(0); + data.append(4); data.append(3); data.append(2); - data.append(3); - data.append(1); - data.append(3); data.append(2); - data.append(1); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32/input_1.cairo b/tests/nodes/less_u32/input_1.cairo index 528c5927d..17e0c6f7b 100644 --- a/tests/nodes/less_u32/input_1.cairo +++ b/tests/nodes/less_u32/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(4); data.append(0); data.append(3); data.append(4); - data.append(2); - data.append(2); + data.append(3); data.append(5); data.append(4); data.append(5); - data.append(4); + data.append(3); + data.append(2); + data.append(0); + data.append(0); data.append(0); - data.append(4); - data.append(5); - data.append(5); data.append(4); data.append(0); + data.append(1); data.append(4); data.append(2); - data.append(4); - data.append(5); - data.append(5); + data.append(0); + data.append(1); + data.append(0); + data.append(2); data.append(3); + data.append(0); + data.append(0); + data.append(0); data.append(4); - data.append(5); - data.append(4); - data.append(5); - data.append(4); - data.append(5); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32/output_0.cairo b/tests/nodes/less_u32/output_0.cairo index 87600c456..cde1b985b 100644 --- a/tests/nodes/less_u32/output_0.cairo +++ b/tests/nodes/less_u32/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(true); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32_broadcast.cairo b/tests/nodes/less_u32_broadcast.cairo index d394bea89..e4ea35ea3 100644 --- a/tests/nodes/less_u32_broadcast.cairo +++ b/tests/nodes/less_u32_broadcast.cairo @@ -3,13 +3,11 @@ mod input_1; mod output_0; -use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; -use core::array::{ArrayTrait, SpanTrait}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_u32_broadcast/input_0.cairo b/tests/nodes/less_u32_broadcast/input_0.cairo index a740ac39b..d16f00b49 100644 --- a/tests/nodes/less_u32_broadcast/input_0.cairo +++ b/tests/nodes/less_u32_broadcast/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(4); + data.append(5); + data.append(4); data.append(1); + data.append(3); + data.append(0); + data.append(5); + data.append(5); + data.append(3); data.append(4); data.append(2); - data.append(5); + data.append(2); data.append(5); data.append(0); - data.append(3); - data.append(3); data.append(0); - data.append(1); - data.append(2); - data.append(4); data.append(0); - data.append(2); + data.append(0); + data.append(0); + data.append(1); data.append(1); data.append(1); data.append(2); - data.append(5); - data.append(2); - data.append(3); - data.append(4); data.append(1); + data.append(0); data.append(3); + data.append(4); data.append(1); - data.append(2); - data.append(5); - data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32_broadcast/input_1.cairo b/tests/nodes/less_u32_broadcast/input_1.cairo index f05f4343a..0f4dcd907 100644 --- a/tests/nodes/less_u32_broadcast/input_1.cairo +++ b/tests/nodes/less_u32_broadcast/input_1.cairo @@ -10,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(0); - data.append(5); + data.append(1); data.append(5); + data.append(3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32_broadcast/output_0.cairo b/tests/nodes/less_u32_broadcast/output_0.cairo index a9ade1a52..f22ac65b4 100644 --- a/tests/nodes/less_u32_broadcast/output_0.cairo +++ b/tests/nodes/less_u32_broadcast/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(true); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/not_bool.cairo b/tests/nodes/not_bool.cairo index cc73e1cd4..d5b30a089 100644 --- a/tests/nodes/not_bool.cairo +++ b/tests/nodes/not_bool.cairo @@ -3,18 +3,18 @@ mod output_0; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::TensorTrait; use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::BoolTensorPartialEq; -use orion::utils::assert_eq; +use orion::utils::{assert_eq, assert_seq_eq}; #[test] #[available_gas(2000000000)] fn test_not_bool() { let input_0 = input_0::input_0(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.not(); + let y_0 = input_0.not(); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/not_bool/input_0.cairo b/tests/nodes/not_bool/input_0.cairo index eef582ae5..17f34d5de 100644 --- a/tests/nodes/not_bool/input_0.cairo +++ b/tests/nodes/not_bool/input_0.cairo @@ -3,7 +3,7 @@ use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::BoolTensor; fn input_0() -> Tensor { - let mut shape = ArrayTrait::new(); + let mut shape = ArrayTrait::::new(); shape.append(1); shape.append(1); diff --git a/tests/nodes/not_bool/output_0.cairo b/tests/nodes/not_bool/output_0.cairo index 43bb7750d..8b59aea96 100644 --- a/tests/nodes/not_bool/output_0.cairo +++ b/tests/nodes/not_bool/output_0.cairo @@ -3,7 +3,7 @@ use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::BoolTensor; fn output_0() -> Tensor { - let mut shape = ArrayTrait::new(); + let mut shape = ArrayTrait::::new(); shape.append(1); shape.append(1); From 9d964fbddea9351ea1eb0693994785a3e372a376 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Thu, 21 Mar 2024 19:11:25 +0100 Subject: [PATCH 25/68] refactor reshape --- src/operators/nn/functional/col2im.cairo | 62 +- .../nn/functional/conv_transpose.cairo | 234 +- .../nn/functional/depth_to_space.cairo | 25 +- .../nn/functional/space_to_depth.cairo | 27 +- src/operators/tensor/core.cairo | 68 +- .../tensor/implementations/tensor_bool.cairo | 2 +- .../implementations/tensor_complex64.cairo | 2 +- .../implementations/tensor_fp16x16.cairo | 2 +- .../implementations/tensor_fp16x16wide.cairo | 2 +- .../implementations/tensor_fp32x32.cairo | 2 +- .../implementations/tensor_fp64x64.cairo | 2 +- .../implementations/tensor_fp8x23.cairo | 2 +- .../implementations/tensor_fp8x23wide.cairo | 2 +- .../tensor/implementations/tensor_i32.cairo | 2 +- .../tensor/implementations/tensor_i8.cairo | 2 +- .../tensor/implementations/tensor_u32.cairo | 2 +- .../manipulation/split_to_sequence.cairo | 170 +- src/operators/tensor/math/flatten.cairo | 8 +- .../tensor/math/layer_normalization.cairo | 18 +- tests/lib.cairo | 12 +- tests/nodes.cairo | 2086 ++++++++--------- 21 files changed, 1440 insertions(+), 1292 deletions(-) diff --git a/src/operators/nn/functional/col2im.cairo b/src/operators/nn/functional/col2im.cairo index 4f9cfc1a8..a3ad47a49 100644 --- a/src/operators/nn/functional/col2im.cairo +++ b/src/operators/nn/functional/col2im.cairo @@ -56,12 +56,16 @@ fn col2im, +NumberTrait, +Copy, +Drop, +Ad let bl = prod(block_shape, 0); let C = *(*data).shape.at(1) / bl; - let mut new_shape = array![*(*data).shape.at(0), C, bl]; + let mut new_shape: Array = array![ + (*(*data).shape.at(0)).try_into().unwrap(), C.try_into().unwrap(), bl.try_into().unwrap() + ]; let mut i = 2; - while i != (*data).shape.len() { - new_shape.append(*(*data).shape.at(i)); - i += 1; - }; + while i != (*data) + .shape + .len() { + new_shape.append((*(*data).shape.at(i)).try_into().unwrap()); + i += 1; + }; let data = data.reshape(new_shape.span()); @@ -69,30 +73,36 @@ fn col2im, +NumberTrait, +Copy, +Drop, +Ad let data_stride = stride(data.shape); let mut n = 0; - while n != *data.shape.at(0) { - let mut c = 0; - while c != *data.shape.at(1) { - let data_n_c = TensorTrait::new( - SpanTrait::slice(data.shape, 2, data.shape.len() - 2), - SpanTrait::slice( - data.data, n * *data_stride.at(0) + c * *data_stride.at(1), *data_stride.at(1) - ) - ); - let mut out = col2im_naive_implementation( - @data_n_c, image_shape, block_shape, dilations, pads, strides - ); - let mut i = 0; - while i != out.len() { - res.append(out.at(i)); - i += 1; - }; + while n != *data + .shape + .at(0) { + let mut c = 0; + while c != *data + .shape + .at(1) { + let data_n_c = TensorTrait::new( + SpanTrait::slice(data.shape, 2, data.shape.len() - 2), + SpanTrait::slice( + data.data, + n * *data_stride.at(0) + c * *data_stride.at(1), + *data_stride.at(1) + ) + ); + let mut out = col2im_naive_implementation( + @data_n_c, image_shape, block_shape, dilations, pads, strides + ); + let mut i = 0; + while i != out.len() { + res.append(out.at(i)); + i += 1; + }; + + c += 1; + }; - c += 1; + n += 1; }; - n += 1; - }; - let mut new_shape = array![*data.shape.at(0), *data.shape.at(1)]; let mut i = 0; while i != image_shape.len() { diff --git a/src/operators/nn/functional/conv_transpose.cairo b/src/operators/nn/functional/conv_transpose.cairo index f8f810558..c24c2163d 100644 --- a/src/operators/nn/functional/conv_transpose.cairo +++ b/src/operators/nn/functional/conv_transpose.cairo @@ -61,11 +61,13 @@ fn conv_transpose< Option::None => { let mut output_padding: Array = array![]; let mut i = 2; - while i != (*X).shape.len() { - output_padding.append(0); - output_padding.append(0); - i += 1; - }; + while i != (*X) + .shape + .len() { + output_padding.append(0); + output_padding.append(0); + i += 1; + }; output_padding.span() }, @@ -151,10 +153,11 @@ fn conv_transpose< Option::None => { let mut output_shape: Array = array![]; let mut i = 0; - while i != strides.len() { - output_shape.append(*(*X).shape.at(i + 2) * *strides.at(i)); - i += 1; - }; + while i != strides + .len() { + output_shape.append(*(*X).shape.at(i + 2) * *strides.at(i)); + i += 1; + }; output_shape.span() }, @@ -162,16 +165,17 @@ fn conv_transpose< let mut total_padding: Array = array![]; let mut i = 0; - while i != output_shape.len() { - total_padding - .append( - (*(*X).shape.at(i + 2) - 1) * *strides.at(i) - + *output_padding.at(i) - + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1) - - *output_shape.at(i) - ); - i += 1; - }; + while i != output_shape + .len() { + total_padding + .append( + (*(*X).shape.at(i + 2) - 1) * *strides.at(i) + + *output_padding.at(i) + + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1) + - *output_shape.at(i) + ); + i += 1; + }; let total_padding = total_padding.span(); @@ -184,10 +188,11 @@ fn conv_transpose< }; let mut i = 0; - while i != output_shape.len() { - pads.append(*total_padding.at(i) - (*total_padding.at(i) / 2)); - i += 1; - }; + while i != output_shape + .len() { + pads.append(*total_padding.at(i) - (*total_padding.at(i) / 2)); + i += 1; + }; (pads.span(), pads.len() / 2, output_shape) }, @@ -197,10 +202,11 @@ fn conv_transpose< Option::None => { let mut output_shape: Array = array![]; let mut i = 0; - while i != strides.len() { - output_shape.append(*(*X).shape.at(i + 2) * *strides.at(i)); - i += 1; - }; + while i != strides + .len() { + output_shape.append(*(*X).shape.at(i + 2) * *strides.at(i)); + i += 1; + }; output_shape.span() }, @@ -208,26 +214,28 @@ fn conv_transpose< let mut total_padding: Array = array![]; let mut i = 0; - while i != output_shape.len() { - total_padding - .append( - (*(*X).shape.at(i + 2) - 1) * *strides.at(i) - + *output_padding.at(i) - + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1) - - *output_shape.at(i) - ); - i += 1; - }; + while i != output_shape + .len() { + total_padding + .append( + (*(*X).shape.at(i + 2) - 1) * *strides.at(i) + + *output_padding.at(i) + + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1) + - *output_shape.at(i) + ); + i += 1; + }; let total_padding = total_padding.span(); let mut pads: Array = array![]; let mut i = 0; - while i != output_shape.len() { - pads.append(*total_padding.at(i) - *total_padding.at(i) / 2); - i += 1; - }; + while i != output_shape + .len() { + pads.append(*total_padding.at(i) - *total_padding.at(i) / 2); + i += 1; + }; let mut i = 0; while i != output_shape.len() { @@ -302,50 +310,62 @@ fn conv_transpose< if group == 1 { let mut image_id = 0; - while image_id != *(*X).shape.at(0) { - let w_t = TensorTrait::new(array![k, m].span(), (*W).data) - .transpose(array![1, 0].span()); - - let image = SpanTrait::slice((*X).data, image_id * k * n, k * n); - let gemm = w_t.matmul(@TensorTrait::new(array![k, n].span(), image)); - - let gemmc = gemm - .reshape(array![num_output_channels, m / num_output_channels, n].span()); - let mut c = 0; - while c != num_output_channels { - let gemmc_c = TensorTrait::new( - array![m / num_output_channels, n].span(), - SpanTrait::slice( - gemmc.data, (m / num_output_channels) * n * c, (m / num_output_channels) * n - ) - ); - - let mut res = col2im_naive_implementation( - @gemmc_c, output_shape, kernel_shape, dilations, pads, strides - ); - - match B { - Option::Some(B) => { - let mut i = 0; - while i != res.len() { - res.set(i, res.at(i) + *(*B).data.at(c)); - i += 1; - }; - }, - Option::None => {}, - } + while image_id != *(*X) + .shape + .at(0) { + let w_t = TensorTrait::new(array![k, m].span(), (*W).data) + .transpose(array![1, 0].span()); + + let image = SpanTrait::slice((*X).data, image_id * k * n, k * n); + let gemm = w_t.matmul(@TensorTrait::new(array![k, n].span(), image)); + + let gemmc = gemm + .reshape( + array![ + num_output_channels.try_into().unwrap(), + (m / num_output_channels).try_into().unwrap(), + n.try_into().unwrap() + ] + .span() + ); + let mut c = 0; + while c != num_output_channels { + let gemmc_c = TensorTrait::new( + array![m / num_output_channels, n].span(), + SpanTrait::slice( + gemmc.data, + (m / num_output_channels) * n * c, + (m / num_output_channels) * n + ) + ); + + let mut res = col2im_naive_implementation( + @gemmc_c, output_shape, kernel_shape, dilations, pads, strides + ); + + match B { + Option::Some(B) => { + let mut i = 0; + while i != res + .len() { + res.set(i, res.at(i) + *(*B).data.at(c)); + i += 1; + }; + }, + Option::None => {}, + } - c += 1; + c += 1; - let mut i = 0; - while i != res.len() { - final.append(res.at(i)); - i += 1; + let mut i = 0; + while i != res.len() { + final.append(res.at(i)); + i += 1; + }; }; - }; - image_id += 1; - }; + image_id += 1; + }; } else { let mut output_array: Array> = array![]; @@ -363,19 +383,21 @@ fn conv_transpose< let mut group_W: Array = array![]; let mut image_id = 0; - while image_id != *(*X).shape.at(0) { - let start = image_id * n * C + (group_id * C / group) * n; - let end = image_id * n * C + ((group_id + 1) * C / group) * n; + while image_id != *(*X) + .shape + .at(0) { + let start = image_id * n * C + (group_id * C / group) * n; + let end = image_id * n * C + ((group_id + 1) * C / group) * n; + + let mut i = start; + while i != end { + group_X.append(*(*X).data.at(i)); + i += 1; + }; - let mut i = start; - while i != end { - group_X.append(*(*X).data.at(i)); - i += 1; + image_id += 1; }; - image_id += 1; - }; - let start = (group_id * C / group) * *(*W).shape.at(1) * kernel_size; let end = (group_id + 1) * C / group * *(*W).shape.at(1) * kernel_size; let mut i = start; @@ -433,22 +455,26 @@ fn conv_transpose< // Sorting result per item of the batch // output size : N (batch size) x num_output_channels x output_shape let mut image_id = 0; - while image_id != *(*X).shape.at(0) { - let mut group_id = 0; - while group_id != group { - let group_output = *output_array.at(group_id); - let mut i = image_id * output_size * (num_output_channels / group); - - while i != (image_id + 1) * output_size * (num_output_channels / group) { - final.append(*group_output.at(i)); - i += 1; + while image_id != *(*X) + .shape + .at(0) { + let mut group_id = 0; + while group_id != group { + let group_output = *output_array.at(group_id); + let mut i = image_id * output_size * (num_output_channels / group); + + while i != (image_id + 1) + * output_size + * (num_output_channels / group) { + final.append(*group_output.at(i)); + i += 1; + }; + + group_id += 1; }; - group_id += 1; + image_id += 1; }; - - image_id += 1; - }; } let mut shape = array![*(*X).shape.at(0), num_output_channels]; diff --git a/src/operators/nn/functional/depth_to_space.cairo b/src/operators/nn/functional/depth_to_space.cairo index 161ea46ad..1201b1222 100644 --- a/src/operators/nn/functional/depth_to_space.cairo +++ b/src/operators/nn/functional/depth_to_space.cairo @@ -20,21 +20,32 @@ fn depth_to_space< ) -> Tensor { assert((tensor.shape).len() == 4, 'Unexpected shape 4.'); - let b = (tensor.shape).at(0); - let C = (tensor.shape).at(1); - let H = (tensor.shape).at(2); - let W = (tensor.shape).at(3); - let finalshape = array![*b, *C / (blocksize * blocksize), *H * blocksize, *W * blocksize]; + let blocksize_i32: i32 = blocksize.try_into().unwrap(); + + let b: i32 = (*(tensor.shape).at(0)).try_into().unwrap(); + let C: u32 = (*(tensor.shape).at(1)).try_into().unwrap(); + let H: i32 = (*(tensor.shape).at(2)).try_into().unwrap(); + let W: i32 = (*(tensor.shape).at(3)).try_into().unwrap(); + let finalshape: Array = array![ + b, + (C / (blocksize * blocksize)).try_into().unwrap(), + (H * blocksize_i32), + (W * blocksize_i32) + ]; if mode == 'DCR' { - let tmpshape = array![*b, blocksize, blocksize, *C / (blocksize * blocksize), *H, *W]; + let tmpshape: Array = array![ + b, blocksize_i32, blocksize_i32, (C / (blocksize * blocksize)).try_into().unwrap(), H, W + ]; let reshaped = (tensor).reshape(target_shape: tmpshape.span()); let transposed = reshaped.transpose(axes: array![0, 3, 4, 1, 5, 2].span()); transposed.reshape(target_shape: finalshape.span()) } else { // assert mode == "CRD" - let tmpshape = array![*b, *C / (blocksize * blocksize), blocksize, blocksize, *H, *W]; + let tmpshape: Array = array![ + b, (C / (blocksize * blocksize)).try_into().unwrap(), blocksize_i32, blocksize_i32, H, W + ]; let reshaped = (tensor).reshape(target_shape: tmpshape.span()); let transposed = reshaped.transpose(axes: array![0, 1, 4, 2, 5, 3].span()); diff --git a/src/operators/nn/functional/space_to_depth.cairo b/src/operators/nn/functional/space_to_depth.cairo index d8e8089cb..c95b500a6 100644 --- a/src/operators/nn/functional/space_to_depth.cairo +++ b/src/operators/nn/functional/space_to_depth.cairo @@ -1,3 +1,4 @@ +use core::option::OptionTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; @@ -20,14 +21,28 @@ fn space_to_depth< ) -> Tensor { assert((tensor.shape).len() == 4, 'Unexpected shape 4.'); - let b = (tensor.shape).at(0); - let C = (tensor.shape).at(1); - let H = (tensor.shape).at(2); - let W = (tensor.shape).at(3); - let tmpshape = array![*b, *C, *H / blocksize, blocksize, *W / blocksize, blocksize]; + let blocksize_i32: i32 = blocksize.try_into().unwrap(); + + let b: i32 = (*(tensor.shape).at(0)).try_into().unwrap(); + let C: i32 = (*(tensor.shape).at(1)).try_into().unwrap(); + let H: u32 = (*(tensor.shape).at(2)); + let W: u32 = (*(tensor.shape).at(3)); + let tmpshape = array![ + b, + C, + (H / blocksize).try_into().unwrap(), + blocksize_i32, + (W / blocksize).try_into().unwrap(), + blocksize_i32 + ]; let reshaped = (tensor).reshape(target_shape: tmpshape.span()); let transposed = reshaped.transpose(axes: array![0, 3, 5, 1, 2, 4].span()); - let finalshape = array![*b, *C * blocksize * blocksize, *H / blocksize, *W / blocksize]; + let finalshape = array![ + b, + C * blocksize_i32 * blocksize_i32, + (H / blocksize).try_into().unwrap(), + (W / blocksize).try_into().unwrap() + ]; transposed.reshape(target_shape: finalshape.span()) } diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 1a08641f3..af20cb446 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -559,7 +559,7 @@ trait TensorTrait { /// # tensor.reshape /// /// ```rust - /// fn reshape(self: @Tensor, target_shape: Span) -> Tensor; + /// fn reshape(self: @Tensor, target_shape: Span) -> Tensor; /// ``` /// /// Returns a new tensor with the specified target shape and the same data as the input tensor. @@ -567,7 +567,7 @@ trait TensorTrait { /// ## Args /// /// * `self`(`@Tensor`) - The input tensor. - /// * `target_shape`(Span) - A span containing the target shape of the tensor. + /// * `target_shape`(Span) - A span containing the target shape of the tensor. /// /// ## Panics /// @@ -595,7 +595,7 @@ trait TensorTrait { /// >>> [[0,1,2,3], [4,5,6,7]] /// ``` /// - fn reshape(self: @Tensor, target_shape: Span) -> Tensor; + fn reshape(self: @Tensor, target_shape: Span) -> Tensor; /// # tensor.transpose /// /// ```rust @@ -5993,8 +5993,66 @@ fn stride(mut shape: Span) -> Span { /// Cf: TensorTrait::reshape docstring -fn reshape(self: @Tensor, target_shape: Span) -> Tensor { - new_tensor(target_shape, *self.data) +fn reshape>>(self: @Tensor, target_shape: Span) -> Tensor { + // Calculate the total number of elements in the original tensor + let mut total_elements = 1; + let mut shape = *self.shape; + loop { + match shape.pop_front() { + Option::Some(val) => total_elements *= *val, + Option::None => { break; } + }; + }; + + // Calculate 'elements_so_far' and find 'inferred_index' + let mut elements_so_far = 1; + let mut inferred_index = Option::None; + let mut target_shape_clone = target_shape.clone(); + let mut i: usize = 0; + loop { + match target_shape_clone.pop_front() { + Option::Some(dim) => { + if *dim == -1 { + if inferred_index.is_none() { + inferred_index = Option::Some(i); + } else { + panic!("Only one dimension can be inferred"); + } + } else if *dim == 0 { + if i >= (*self.shape).len() { + panic!("Dimension out of bounds for using original dimension value"); + } + elements_so_far *= *(*self).shape.at(i); + } else { + if *dim < -1 { + panic!("Invalid dimension size"); + } + elements_so_far *= (*dim).try_into().unwrap(); + }; + }, + Option::None => { break; } + }; + }; + + let mut target_shape_clone = target_shape.clone(); + let mut inferred_shape = ArrayTrait::::new(); + let mut i: usize = 0; + loop { + match target_shape_clone.pop_front() { + Option::Some(dim) => { + if *dim == -1 { + inferred_shape.append(total_elements / elements_so_far) // Inferred dimension + } else if *dim == 0 { + inferred_shape.append(*(*self).shape.at(i)) // Dimension unchanged from original + } else { + inferred_shape.append((*dim).try_into().unwrap()) + }; + }, + Option::None => { break; } + } + }; + + new_tensor(inferred_shape.span(), *self.data) } /// Cf: TensorTrait::at docstring diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index bc7b53569..ad091178f 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -60,7 +60,7 @@ impl BoolTensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + fn reshape(self: @Tensor, target_shape: Span) -> Tensor { reshape(self, target_shape) } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 295138988..6f369ea32 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -69,7 +69,7 @@ impl Complex64Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + fn reshape(self: @Tensor, target_shape: Span) -> Tensor { reshape(self, target_shape) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index 50ba750b0..321bca7c5 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -71,7 +71,7 @@ impl FP16x16Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + fn reshape(self: @Tensor, target_shape: Span) -> Tensor { reshape(self, target_shape) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index 434ac8fc6..280967a07 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -75,7 +75,7 @@ impl FP16x16WTensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + fn reshape(self: @Tensor, target_shape: Span) -> Tensor { reshape(self, target_shape) } diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 05abc897e..007909cd8 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -68,7 +68,7 @@ impl FP32x32Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + fn reshape(self: @Tensor, target_shape: Span) -> Tensor { reshape(self, target_shape) } diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index e9b730f44..fdeab79d7 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -68,7 +68,7 @@ impl FP64x64Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + fn reshape(self: @Tensor, target_shape: Span) -> Tensor { reshape(self, target_shape) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index f6d3d40b0..aa1399dbb 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -68,7 +68,7 @@ impl FP8x23Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + fn reshape(self: @Tensor, target_shape: Span) -> Tensor { reshape(self, target_shape) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index f1982bf66..6fd728c65 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -71,7 +71,7 @@ impl FP8x23WTensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + fn reshape(self: @Tensor, target_shape: Span) -> Tensor { reshape(self, target_shape) } diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 00c1f90c5..e35c99270 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -68,7 +68,7 @@ impl I32Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + fn reshape(self: @Tensor, target_shape: Span) -> Tensor { reshape(self, target_shape) } diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index fc8eb6c6f..d428ef200 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -66,7 +66,7 @@ impl I8Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + fn reshape(self: @Tensor, target_shape: Span) -> Tensor { reshape(self, target_shape) } diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index 12b03fb77..fff763e3b 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -65,7 +65,7 @@ impl U32Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + fn reshape(self: @Tensor, target_shape: Span) -> Tensor { reshape(self, target_shape) } diff --git a/src/operators/tensor/manipulation/split_to_sequence.cairo b/src/operators/tensor/manipulation/split_to_sequence.cairo index 46dbe1af7..90e96e542 100644 --- a/src/operators/tensor/manipulation/split_to_sequence.cairo +++ b/src/operators/tensor/manipulation/split_to_sequence.cairo @@ -1,3 +1,4 @@ +use core::option::OptionTrait; use orion::operators::tensor::{Tensor, TensorTrait, U32Tensor}; use orion::operators::matrix::{MutMatrixTrait, MutMatrix, MutMatrixImpl}; @@ -45,22 +46,24 @@ fn split_to_sequence, +Drop, +TensorTrait,>( if (keepdims == 0 && !has_split) { let mut splited_t_temp: Array> = array![]; let mut i = 0; - while i != splited_t.len() { - let mut shape: Array = array![]; - let mut j = 0; - let shape_in_splited: Span = *splited_t.at(i).shape; - while j != shape_in_splited.len() { - if (j != axis) { - shape.append(*shape_in_splited.at(j)) - } - - j += 1; + while i != splited_t + .len() { + let mut shape: Array = array![]; + let mut j = 0; + let shape_in_splited: Span = *splited_t.at(i).shape; + while j != shape_in_splited + .len() { + if (j != axis) { + shape.append((*shape_in_splited.at(j)).try_into().unwrap()) + } + + j += 1; + }; + + splited_t_temp.append(splited_t[i].reshape(shape.span())); + i += 1; }; - splited_t_temp.append(splited_t[i].reshape(shape.span())); - i += 1; - }; - return splited_t_temp; } splited_t @@ -105,42 +108,45 @@ fn split_num_outputs, +Drop, +TensorTrait,>( let mut sli: MutMatrix = MutMatrixImpl::new((*t).shape.len(), 2); let mut pos: usize = 0; let mut i = 0; - while i != (*t).shape.len() { - let s: usize = *(*t).shape.at(i); - sli.set(i, 0, 0); - sli.set(i, 1, s); - i += 1; - }; + while i != (*t) + .shape + .len() { + let s: usize = *(*t).shape.at(i); + sli.set(i, 0, 0); + sli.set(i, 1, s); + i += 1; + }; let mut i: usize = 0; - while i != split.len() { - let spl = *split.at(i); - sli.set(axis, 0, pos); - pos += spl; - sli.set(axis, 1, pos); - - let end_ele_0 = match sli.get(axis, 0) { - Option::Some(res) => res, - Option::None => { - assert(false, 'Get end_ele_0 is failed'); - 0 - }, - }; - let end_ele_1 = match sli.get(axis, 1) { - Option::Some(res) => res, - Option::None => { - assert(false, 'Get end_ele_0 is failed'); - 0 - }, + while i != split + .len() { + let spl = *split.at(i); + sli.set(axis, 0, pos); + pos += spl; + sli.set(axis, 1, pos); + + let end_ele_0 = match sli.get(axis, 0) { + Option::Some(res) => res, + Option::None => { + assert(false, 'Get end_ele_0 is failed'); + 0 + }, + }; + let end_ele_1 = match sli.get(axis, 1) { + Option::Some(res) => res, + Option::None => { + assert(false, 'Get end_ele_0 is failed'); + 0 + }, + }; + let starts: Span = array![sli.get(0, 0).unwrap(), end_ele_0].span(); + let ends: Span = array![sli.get(0, 1).unwrap(), end_ele_1].span(); + let axes: Option> = Option::None(()); + let steps: Option> = Option::None(()); + let sub_t: Tensor = t.slice(starts, ends, axes, steps); + splited_t.append(sub_t); + i += 1; }; - let starts: Span = array![sli.get(0, 0).unwrap(), end_ele_0].span(); - let ends: Span = array![sli.get(0, 1).unwrap(), end_ele_1].span(); - let axes: Option> = Option::None(()); - let steps: Option> = Option::None(()); - let sub_t: Tensor = t.slice(starts, ends, axes, steps); - splited_t.append(sub_t); - i += 1; - }; splited_t } @@ -154,42 +160,46 @@ fn split_has_split, +Drop, +TensorTrait,>( let mut sli: MutMatrix = MutMatrixImpl::new((*t).shape.len(), 2); let mut pos: usize = 0; let mut i = 0; - while i != (*t).shape.len() { - let s: usize = *(*t).shape.at(i); - sli.set(i, 0, 0); - sli.set(i, 1, s); - i += 1; - }; + while i != (*t) + .shape + .len() { + let s: usize = *(*t).shape.at(i); + sli.set(i, 0, 0); + sli.set(i, 1, s); + i += 1; + }; let mut i: usize = 0; - while i != split.data.len() { - let spl: usize = split.at(indices: array![i].span()); - sli.set(axis, 0, pos); - pos += spl; - sli.set(axis, 1, pos); - - let end_ele_0 = match sli.get(axis, 0) { - Option::Some(res) => { res }, - Option::None => { - assert(false, 'Get end_ele_0 is failed'); - 0 - }, - }; - let end_ele_1 = match sli.get(axis, 1) { - Option::Some(res) => { res }, - Option::None => { - assert(false, 'Get end_ele_0 is failed'); - 0 - }, + while i != split + .data + .len() { + let spl: usize = split.at(indices: array![i].span()); + sli.set(axis, 0, pos); + pos += spl; + sli.set(axis, 1, pos); + + let end_ele_0 = match sli.get(axis, 0) { + Option::Some(res) => { res }, + Option::None => { + assert(false, 'Get end_ele_0 is failed'); + 0 + }, + }; + let end_ele_1 = match sli.get(axis, 1) { + Option::Some(res) => { res }, + Option::None => { + assert(false, 'Get end_ele_0 is failed'); + 0 + }, + }; + let starts: Span = array![sli.get(0, 0).unwrap(), end_ele_0].span(); + let ends: Span = array![sli.get(0, 1).unwrap(), end_ele_1].span(); + let axes: Option> = Option::None(()); + let steps: Option> = Option::None(()); + let sub_t: Tensor = t.slice(starts, ends, axes, steps); + splited_t.append(sub_t); + i += 1; }; - let starts: Span = array![sli.get(0, 0).unwrap(), end_ele_0].span(); - let ends: Span = array![sli.get(0, 1).unwrap(), end_ele_1].span(); - let axes: Option> = Option::None(()); - let steps: Option> = Option::None(()); - let sub_t: Tensor = t.slice(starts, ends, axes, steps); - splited_t.append(sub_t); - i += 1; - }; splited_t } diff --git a/src/operators/tensor/math/flatten.cairo b/src/operators/tensor/math/flatten.cairo index a23671b77..82886a3ff 100644 --- a/src/operators/tensor/math/flatten.cairo +++ b/src/operators/tensor/math/flatten.cairo @@ -23,5 +23,11 @@ fn flatten>(self: @Tensor, axis: usize) let new_shape_second_axis = (*self.data).len() / new_shape_first_axis; - self.reshape(array![new_shape_first_axis, new_shape_second_axis].span()) + self + .reshape( + array![ + new_shape_first_axis.try_into().unwrap(), new_shape_second_axis.try_into().unwrap() + ] + .span() + ) } diff --git a/src/operators/tensor/math/layer_normalization.cairo b/src/operators/tensor/math/layer_normalization.cairo index f185b6bb5..070e9da08 100644 --- a/src/operators/tensor/math/layer_normalization.cairo +++ b/src/operators/tensor/math/layer_normalization.cairo @@ -1,3 +1,5 @@ +use core::option::OptionTrait; +use core::array::SpanTrait; use orion::numbers::{NumberTrait, I32IntoU32}; use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; use orion::operators::tensor::{ @@ -74,8 +76,8 @@ fn layer_normalization< }; let mut shape_matrix = array![]; - shape_matrix.append(row_number); - shape_matrix.append(col_number); + shape_matrix.append(row_number.try_into().unwrap()); + shape_matrix.append(col_number.try_into().unwrap()); // Shape [1, 1] to mutiply one element tensors with 2D matrices let mut shape_one = array![]; @@ -128,7 +130,17 @@ fn layer_normalization< *scale }; - let Y = y_mat.reshape((*self).shape) * scale; + let mut target_shape: Array = array![]; + let mut i = 0; + while i < (*self) + .shape + .len() { + target_shape.append((*(*self).shape.at(i)).try_into().unwrap()); + + i += 1; + }; + + let Y = y_mat.reshape(target_shape.span()) * scale; let Y = match B { Option::Some(B) => { diff --git a/tests/lib.cairo b/tests/lib.cairo index f5cecb77d..eb58139db 100644 --- a/tests/lib.cairo +++ b/tests/lib.cairo @@ -1,7 +1,7 @@ -mod numbers; -mod performance; -mod tensor_core; -mod nodes; -mod ml; -mod operators; +// mod numbers; +// mod performance; +// mod tensor_core; +// mod nodes; +// mod ml; +// mod operators; diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 2907ed7aa..9d921be62 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1,1043 +1,1043 @@ -mod abs_fp16x16; -mod abs_fp8x23; -mod abs_i32; -mod abs_i8; -mod acos_fp16x16; -mod acos_fp8x23; -mod acosh_fp16x16; -mod acosh_fp8x23; -mod add_fp16x16; -mod add_fp16x16_broadcast; -mod add_fp8x23; -mod add_fp8x23_broadcast; -mod add_i32; -mod add_i32_broadcast; -mod add_i8; -mod add_i8_broadcast; -mod add_u32; -mod add_u32_broadcast; -mod argmax_fp16x16_1D_default; -mod argmax_fp16x16_1D_keepdims_false; -mod argmax_fp16x16_1D_last_index; -mod argmax_fp16x16_2D_default; -mod argmax_fp16x16_2D_keepdims_false; -mod argmax_fp16x16_2D_last_index; -mod argmax_fp16x16_3D_default; -mod argmax_fp16x16_3D_keepdims_false; -mod argmax_fp16x16_3D_last_index; -mod argmax_fp8x23_1D_default; -mod argmax_fp8x23_1D_keepdims_false; -mod argmax_fp8x23_1D_last_index; -mod argmax_fp8x23_2D_default; -mod argmax_fp8x23_2D_keepdims_false; -mod argmax_fp8x23_2D_last_index; -mod argmax_fp8x23_3D_default; -mod argmax_fp8x23_3D_keepdims_false; -mod argmax_fp8x23_3D_last_index; -mod argmax_i32_1D_default; -mod argmax_i32_1D_keepdims_false; -mod argmax_i32_1D_last_index; -mod argmax_i32_2D_default; -mod argmax_i32_2D_keepdims_false; -mod argmax_i32_2D_last_index; -mod argmax_i32_3D_default; -mod argmax_i32_3D_keepdims_false; -mod argmax_i32_3D_last_index; -mod argmax_i8_1D_default; -mod argmax_i8_1D_keepdims_false; -mod argmax_i8_1D_last_index; -mod argmax_i8_2D_default; -mod argmax_i8_2D_keepdims_false; -mod argmax_i8_2D_last_index; -mod argmax_i8_3D_default; -mod argmax_i8_3D_keepdims_false; -mod argmax_i8_3D_last_index; -mod argmax_u32_1D_default; -mod argmax_u32_1D_keepdims_false; -mod argmax_u32_1D_last_index; -mod argmax_u32_2D_default; -mod argmax_u32_2D_keepdims_false; -mod argmax_u32_2D_last_index; -mod argmax_u32_3D_default; -mod argmax_u32_3D_keepdims_false; -mod argmax_u32_3D_last_index; -mod argmin_fp16x16_1D_default; -mod argmin_fp16x16_1D_keepdims_false; -mod argmin_fp16x16_1D_last_index; -mod argmin_fp16x16_2D_default; -mod argmin_fp16x16_2D_keepdims_false; -mod argmin_fp16x16_2D_last_index; -mod argmin_fp16x16_3D_default; -mod argmin_fp16x16_3D_keepdims_false; -mod argmin_fp16x16_3D_last_index; -mod argmin_fp8x23_1D_default; -mod argmin_fp8x23_1D_keepdims_false; -mod argmin_fp8x23_1D_last_index; -mod argmin_fp8x23_2D_default; -mod argmin_fp8x23_2D_keepdims_false; -mod argmin_fp8x23_2D_last_index; -mod argmin_fp8x23_3D_default; -mod argmin_fp8x23_3D_keepdims_false; -mod argmin_fp8x23_3D_last_index; -mod argmin_i32_1D_default; -mod argmin_i32_1D_keepdims_false; -mod argmin_i32_1D_last_index; -mod argmin_i32_2D_default; -mod argmin_i32_2D_keepdims_false; -mod argmin_i32_2D_last_index; -mod argmin_i32_3D_default; -mod argmin_i32_3D_keepdims_false; -mod argmin_i32_3D_last_index; -mod argmin_i8_1D_default; -mod argmin_i8_1D_keepdims_false; -mod argmin_i8_1D_last_index; -mod argmin_i8_2D_default; -mod argmin_i8_2D_keepdims_false; -mod argmin_i8_2D_last_index; -mod argmin_i8_3D_default; -mod argmin_i8_3D_keepdims_false; -mod argmin_i8_3D_last_index; -mod argmin_u32_1D_default; -mod argmin_u32_1D_keepdims_false; -mod argmin_u32_1D_last_index; -mod argmin_u32_2D_default; -mod argmin_u32_2D_keepdims_false; -mod argmin_u32_2D_last_index; -mod argmin_u32_3D_default; -mod argmin_u32_3D_keepdims_false; -mod argmin_u32_3D_last_index; -mod asin_fp16x16; -mod asin_fp8x23; -mod asinh_fp16x16; -mod asinh_fp8x23; -mod atan_fp16x16; -mod atan_fp8x23; -mod ceil_fp16x16; -mod ceil_fp8x23; -mod concat_fp16x16_1d; -mod concat_fp16x16_2d; -mod concat_fp16x16_3d_default; -mod concat_fp16x16_3d_axis_1; -mod concat_fp16x16_3d_axis_2; -mod concat_fp16x16_3d_three_tensors_axis_1; -mod concat_fp16x16_3d_three_tensors_axis_2; -mod concat_fp8x23_1d; -mod concat_fp8x23_2d; -mod concat_fp8x23_3d_default; -mod concat_fp8x23_3d_axis_1; -mod concat_fp8x23_3d_axis_2; -mod concat_fp8x23_3d_three_tensors_axis_1; -mod concat_fp8x23_3d_three_tensors_axis_2; -mod concat_i32_1d; -mod concat_i32_2d; -mod concat_i32_3d_default; -mod concat_i32_3d_axis_1; -mod concat_i32_3d_axis_2; -mod concat_i32_3d_three_tensors_axis_1; -mod concat_i32_3d_three_tensors_axis_2; -mod concat_i8_1d; -mod concat_i8_2d; -mod concat_i8_3d_default; -mod concat_i8_3d_axis_1; -mod concat_i8_3d_axis_2; -mod concat_i8_3d_three_tensors_axis_1; -mod concat_i8_3d_three_tensors_axis_2; -mod concat_u32_1d; -mod concat_u32_2d; -mod concat_u32_3d_default; -mod concat_u32_3d_axis_1; -mod concat_u32_3d_axis_2; -mod concat_u32_3d_three_tensors_axis_1; -mod concat_u32_3d_three_tensors_axis_2; -mod cos_fp16x16; -mod cos_fp8x23; -mod cosh_fp16x16; -mod cosh_fp8x23; -mod cumsum_fp16x16_1d_default; -mod cumsum_fp16x16_1d_exclusive; -mod cumsum_fp16x16_1d_reverse; -mod cumsum_fp16x16_1d_reverse_exclusive; -mod cumsum_fp16x16_2d_axis_0; -mod cumsum_fp16x16_2d_axis_1; -mod cumsum_fp8x23_1d_default; -mod cumsum_fp8x23_1d_exclusive; -mod cumsum_fp8x23_1d_reverse; -mod cumsum_fp8x23_1d_reverse_exclusive; -mod cumsum_fp8x23_2d_axis_0; -mod cumsum_fp8x23_2d_axis_1; -mod cumsum_i32_1d_default; -mod cumsum_i32_1d_exclusive; -mod cumsum_i32_1d_reverse; -mod cumsum_i32_1d_reverse_exclusive; -mod cumsum_i32_2d_axis_0; -mod cumsum_i32_2d_axis_1; -mod cumsum_i8_1d_default; -mod cumsum_i8_1d_exclusive; -mod cumsum_i8_1d_reverse; -mod cumsum_i8_1d_reverse_exclusive; -mod cumsum_i8_2d_axis_0; -mod cumsum_i8_2d_axis_1; -mod cumsum_u32_1d_default; -mod cumsum_u32_1d_exclusive; -mod cumsum_u32_1d_reverse; -mod cumsum_u32_1d_reverse_exclusive; -mod cumsum_u32_2d_axis_0; -mod cumsum_u32_2d_axis_1; -mod div_fp16x16; -mod div_fp16x16_broadcast; -mod div_fp8x23; -mod div_fp8x23_broadcast; -mod div_i32; -mod div_i32_broadcast; -mod div_i8; -mod div_i8_broadcast; -mod div_u32; -mod div_u32_broadcast; -mod equal_fp16x16; -mod equal_fp16x16_broadcast; -mod equal_fp8x23; -mod equal_fp8x23_broadcast; -mod equal_i32; -mod equal_i32_broadcast; -mod equal_i8; -mod equal_i8_broadcast; -mod equal_u32; -mod equal_u32_broadcast; -mod exp_fp16x16; -mod exp_fp8x23; -mod less_equal_fp16x16; -mod less_equal_fp16x16_broadcast; -mod less_equal_fp8x23; -mod less_equal_fp8x23_broadcast; -mod less_equal_i32; -mod less_equal_i32_broadcast; -mod less_equal_i8; -mod less_equal_i8_broadcast; -mod less_equal_u32; -mod less_equal_u32_broadcast; -mod greater_fp16x16; -mod greater_fp16x16_broadcast; -mod greater_fp8x23; -mod greater_fp8x23_broadcast; -mod greater_i32; -mod greater_i32_broadcast; -mod greater_i8; -mod greater_i8_broadcast; -mod greater_u32; -mod greater_u32_broadcast; -mod leaky_relu_fp16x16; -mod leaky_relu_fp8x23; -mod linear_fp16x16; -mod linear_fp8x23; -mod linear_i32; -mod linear_i8; -mod linear_u32; -mod log_fp16x16; -mod log_fp8x23; -mod logsoftmax_fp16x16_axis_0; -mod logsoftmax_fp16x16_axis_1; -mod logsoftmax_fp8x23_axis_0; -mod logsoftmax_fp8x23_axis_1; -mod matmul_fp16x16_1d; -mod matmul_fp16x16_2x2; -mod matmul_fp16x16_2x1; -mod matmul_fp16x16_1x2; -mod matmul_fp8x23_1d; -mod matmul_fp8x23_2x2; -mod matmul_fp8x23_2x1; -mod matmul_fp8x23_1x2; -mod matmul_i32_1d; -mod matmul_i32_2x2; -mod matmul_i32_2x1; -mod matmul_i32_1x2; -mod matmul_i8_1d; -mod matmul_i8_2x2; -mod matmul_i8_2x1; -mod matmul_i8_1x2; -mod matmul_u32_1d; -mod matmul_u32_2x2; -mod matmul_u32_2x1; -mod matmul_u32_1x2; -mod mul_fp16x16; -mod mul_fp16x16_broadcast; -mod mul_fp8x23; -mod mul_fp8x23_broadcast; -mod mul_i32; -mod mul_i32_broadcast; -mod mul_i8; -mod mul_i8_broadcast; -mod mul_u32; -mod mul_u32_broadcast; -mod or_fp16x16; -mod or_fp16x16_broadcast; -mod or_fp8x23; -mod or_fp8x23_broadcast; -mod or_i32; -mod or_i32_broadcast; -mod or_i8; -mod or_i8_broadcast; -mod or_u32; -mod or_u32_broadcast; -mod relu_fp16x16; -mod relu_fp8x23; -mod relu_i32; -mod relu_i8; -mod sigmoid_fp16x16; -mod sigmoid_fp8x23; -mod sin_fp16x16; -mod sin_fp8x23; -mod sinh_fp16x16; -mod sinh_fp8x23; -mod softmax_fp16x16; -mod softmax_fp8x23; -mod softplus_fp8x23; -mod softplus_fp16x16; -mod softsign_fp8x23; -mod softsign_fp16x16; -mod sqrt_fp16x16; -mod sqrt_fp8x23; -mod sub_fp16x16; -mod sub_fp16x16_broadcast; -mod sub_fp8x23; -mod sub_fp8x23_broadcast; -mod sub_i32; -mod sub_i32_broadcast; -mod sub_i8; -mod sub_i8_broadcast; -mod sub_u32; -mod sub_u32_broadcast; -mod tanh_fp16x16; -mod tanh_fp8x23; -mod transpose_fp16x16_2d; -mod transpose_fp16x16_3d; -mod transpose_fp8x23_2d; -mod transpose_fp8x23_3d; -mod transpose_i32_2d; -mod transpose_i32_3d; -mod transpose_i8_2d; -mod transpose_i8_3d; -mod transpose_u32_2d; -mod transpose_u32_3d; -mod xor_fp16x16; -mod xor_fp16x16_broadcast; -mod xor_fp8x23; -mod xor_fp8x23_broadcast; -mod xor_i32; -mod xor_i32_broadcast; -mod xor_i8; -mod xor_i8_broadcast; -mod xor_u32; -mod xor_u32_broadcast; -mod less_fp16x16; -mod less_fp16x16_broadcast; -mod less_fp8x23; -mod less_fp8x23_broadcast; -mod less_i32; -mod less_i32_broadcast; -mod less_i8; -mod less_i8_broadcast; -mod less_u32; -mod less_u32_broadcast; -mod greater_equal_fp16x16; -mod greater_equal_fp16x16_broadcast; -mod greater_equal_fp8x23; -mod greater_equal_fp8x23_broadcast; -mod greater_equal_i32; -mod greater_equal_i32_broadcast; -mod greater_equal_i8; -mod greater_equal_i8_broadcast; -mod greater_equal_u32; -mod greater_equal_u32_broadcast; -mod slice_fp16x16_2d; -mod slice_fp16x16_3d; -mod slice_fp8x23_2d; -mod slice_fp8x23_3d; -mod slice_i32_2d; -mod slice_i32_3d; -mod slice_i8_2d; -mod slice_i8_3d; -mod slice_u32_2d; -mod slice_u32_3d; -mod gather_fp8x23_3d_default; -mod gather_fp8x23_3d_axis1; -mod gather_fp8x23_3d_axis2; -mod gather_fp16x16_3d_default; -mod gather_fp16x16_3d_axis1; -mod gather_fp16x16_3d_axis2; -mod gather_i8_3d_default; -mod gather_i8_3d_axis1; -mod gather_i8_3d_axis2; -mod gather_i32_3d_default; -mod gather_i32_3d_axis1; -mod gather_i32_3d_axis2; -mod gather_u32_3d_default; -mod gather_u32_3d_axis1; -mod gather_u32_3d_axis2; -mod nonzero_fp16x16_2d; -mod nonzero_fp16x16_3d; -mod nonzero_fp8x23_2d; -mod nonzero_fp8x23_3d; -mod nonzero_i32_2d; -mod nonzero_i32_3d; -mod nonzero_i8_2d; -mod nonzero_i8_3d; -mod nonzero_u32_2d; -mod nonzero_u32_3d; -mod squeeze_fP16x16; -mod squeeze_fP8x23; -mod squeeze_i32; -mod squeeze_i8; -mod squeeze_u32; -mod unsqueeze_fp16x16_2d; -mod unsqueeze_fp16x16_3d; -mod unsqueeze_fp8x23_2d; -mod unsqueeze_fp8x23_3d; -mod unsqueeze_i32_2d; -mod unsqueeze_i32_3d; -mod unsqueeze_i8_2d; -mod unsqueeze_i8_3d; -mod unsqueeze_u32_2d; -mod unsqueeze_u32_3d; -mod sign_fP16x16; -mod sign_fP8x23; -mod sign_fail; -mod sign_i32; -mod sign_i8; -mod clip_fp16x16_2d; -mod clip_fp16x16_3d; -mod clip_fp8x23_2d; -mod clip_fp8x23_3d; -mod clip_i32_2d; -mod clip_i32_3d; -mod clip_i8_2d; -mod clip_i8_3d; -mod clip_u32_2d; -mod clip_u32_3d; -mod identity_fP16x16; -mod identity_fP8x23; -mod identity_i32; -mod identity_i8; -mod identity_u32; -mod thresholded_relu_fp16x16; -mod thresholded_relu_fp8x23; -mod hard_sigmoid_fp8x23; -mod hard_sigmoid_fp16x16; -mod neg_fp16x16; -mod neg_fp8x23; -mod neg_i32; -mod neg_i8; -mod gemm_all_attributes; -mod gemm_alpha; -mod gemm_beta; -mod gemm_default_matrix_bias; -mod gemm_default_vector_bias; -mod gemm_default_no_bias; -mod gemm_transposeA; -mod gemm_transposeB; -mod min_fp16x16_three_tensors; -mod min_fp16x16_broadcast_three_tensors; -mod min_fp16x16_two_tensors; -mod min_fp16x16_broadcast_two_tensors; -mod min_fp8x23_three_tensors; -mod min_fp8x23_broadcast_three_tensors; -mod min_fp8x23_two_tensors; -mod min_fp8x23_broadcast_two_tensors; -mod min_i32_three_tensors; -mod min_i32_broadcast_three_tensors; -mod min_i32_two_tensors; -mod min_i32_broadcast_two_tensors; -mod min_i8_three_tensors; -mod min_i8_broadcast_three_tensors; -mod min_i8_two_tensors; -mod min_i8_broadcast_two_tensors; -mod min_u32_three_tensors; -mod min_u32_broadcast_three_tensors; -mod min_u32_two_tensors; -mod min_u32_broadcast_two_tensors; -mod where_fp16x16; -mod where_fp16x16_broadcast; -mod where_fp8x23; -mod where_fp8x23_broadcast; -mod where_i32; -mod where_i32_broadcast; -mod where_i8; -mod where_i8_broadcast; -mod where_u32; -mod where_u32_broadcast; -mod not_bool; -mod round_fp16x16; -mod round_fp8x23; -mod max_fp16x16_three_tensors; -mod max_fp16x16_broadcast_three_tensors; -mod max_fp16x16_two_tensors; -mod max_fp16x16_broadcast_two_tensors; -mod max_fp8x23_three_tensors; -mod max_fp8x23_broadcast_three_tensors; -mod max_fp8x23_two_tensors; -mod max_fp8x23_broadcast_two_tensors; -mod max_i32_three_tensors; -mod max_i32_broadcast_three_tensors; -mod max_i32_two_tensors; -mod max_i32_broadcast_two_tensors; -mod max_i8_three_tensors; -mod max_i8_broadcast_three_tensors; -mod max_i8_two_tensors; -mod max_i8_broadcast_two_tensors; -mod max_u32_three_tensors; -mod max_u32_broadcast_three_tensors; -mod max_u32_two_tensors; -mod max_u32_broadcast_two_tensors; -mod scatter_fp16x16_3d_default; -mod scatter_fp16x16_3d_axis1; -mod scatter_fp16x16_3d_axis1_add; -mod scatter_fp8x23_default; -mod scatter_fp8x23_axis1; -mod scatter_fp8x23_mul; -mod scatter_i8_default; -mod scatter_i8_axis1; -mod scatter_i8_axis1_max; -mod scatter_u32_default; -mod scatter_u32_axis1; -mod scatter_u32_add; -mod array_feature_extractor_1D_i32; -mod array_feature_extractor_1D_fp8x23; -mod array_feature_extractor_1D_fp16x16; -mod array_feature_extractor_2D_i32; -mod array_feature_extractor_2D_fp8x23; -mod array_feature_extractor_2D_fp16x16; -mod array_feature_extractor_3D_i32; -mod array_feature_extractor_3D_fp8x23; -mod array_feature_extractor_3D_fp16x16; -mod binarizer_fp16x16; -mod binarizer_fp8x23; -mod tril_fp16x16; -mod tril_fp16x16_neg; -mod tril_fp16x16_one_row; -mod tril_fp16x16_out_neg; -mod tril_fp16x16_out_pos; -mod tril_fp16x16_pos; -mod tril_fp16x16_square; -mod tril_fp16x16_square_neg; -mod tril_fp16x16_zero; -mod triu_fp16x16; -mod triu_fp16x16_neg; -mod triu_fp16x16_one_row; -mod triu_fp16x16_out_neg; -mod triu_fp16x16_out_pos; -mod triu_fp16x16_pos; -mod triu_fp16x16_square; -mod triu_fp16x16_square_neg; -mod triu_fp16x16_zero; -mod tril_fp8x23; -mod tril_fp8x23_neg; -mod tril_fp8x23_one_row; -mod tril_fp8x23_out_neg; -mod tril_fp8x23_out_pos; -mod tril_fp8x23_pos; -mod tril_fp8x23_square; -mod tril_fp8x23_square_neg; -mod tril_fp8x23_zero; -mod triu_fp8x23; -mod triu_fp8x23_neg; -mod triu_fp8x23_one_row; -mod triu_fp8x23_out_neg; -mod triu_fp8x23_out_pos; -mod triu_fp8x23_pos; -mod triu_fp8x23_square; -mod triu_fp8x23_square_neg; -mod triu_fp8x23_zero; -mod tril_i32; -mod tril_neg_i32; -mod tril_i32_one_row; -mod tril_i32_out_neg; -mod tril_i32_out_pos; -mod tril_i32_pos; -mod tril_i32_square; -mod tril_i32_square_neg; -mod tril_i32_zero; -mod triu_i32; -mod triu_i32_neg; -mod triu_i32_one_row; -mod triu_i32_out_neg; -mod triu_i32_out_pos; -mod triu_i32_pos; -mod triu_i32_square; -mod triu_i32_square_neg; -mod triu_i32_zero; -mod tril_i8; -mod tril_i8_neg; -mod tril_i8_one_row; -mod tril_i8_out_neg; -mod tril_i8_out_pos; -mod tril_i8_pos; -mod tril_i8_square; -mod tril_i8_square_neg; -mod tril_i8_zero; -mod triu_i8; -mod triu_i8_neg; -mod triu_i8_one_row; -mod triu_i8_out_neg; -mod triu_i8_out_pos; -mod triu_i8_pos; -mod triu_i8_square; -mod triu_i8_square_neg; -mod triu_i8_zero; -mod tril_u32; -mod tril_u32_neg; -mod tril_u32_one_row; -mod tril_u32_out_neg; -mod tril_u32_out_pos; -mod tril_u32_pos; -mod tril_u32_square; -mod tril_u32_square_neg; -mod tril_u32_zero; -mod triu_u32; -mod triu_u32_neg; -mod triu_u32_one_row; -mod triu_u32_out_neg; -mod triu_u32_out_pos; -mod triu_u32_pos; -mod triu_u32_square; -mod triu_u32_square_neg; -mod triu_u32_zero; -mod reduce_sum_square_fp16x16_export_do_not_keepdims; -mod reduce_sum_square_fp16x16_export_keepdims; -mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; -mod reduce_sum_square_fp8x23_export_do_not_keepdims; -mod reduce_sum_square_fp8x23_export_keepdims; -mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; -mod reduce_sum_square_i32_export_do_not_keepdims; -mod reduce_sum_square_i32_export_keepdims; -mod reduce_sum_square_i32_export_negative_axes_keepdims; -mod reduce_sum_square_i8_export_do_not_keepdims; -mod reduce_sum_square_i8_export_keepdims; -mod reduce_sum_square_i8_export_negative_axes_keepdims; -mod reduce_sum_square_u32_export_do_not_keepdims; -mod reduce_sum_square_u32_export_keepdims; -mod reduce_sum_square_u32_export_negative_axes_keepdims; -mod reduce_l2_fp16x16_export_do_not_keepdims; -mod reduce_l2_fp16x16_export_keepdims; -mod reduce_l2_fp16x16_export_negative_axes_keepdims; -mod reduce_l2_fp8x23_export_do_not_keepdims; -mod reduce_l2_fp8x23_export_keepdims; -mod reduce_l2_fp8x23_export_negative_axes_keepdims; -mod reduce_l1_fp16x16_export_do_not_keepdims; -mod reduce_l1_fp16x16_export_keepdims; -mod reduce_l1_fp16x16_export_negative_axes_keepdims; -mod reduce_l1_fp8x23_export_do_not_keepdims; -mod reduce_l1_fp8x23_export_keepdims; -mod reduce_l1_fp8x23_export_negative_axes_keepdims; -mod reduce_l1_i32_export_do_not_keepdims; -mod reduce_l1_i32_export_keepdims; -mod reduce_l1_i32_export_negative_axes_keepdims; -mod reduce_l1_i8_export_do_not_keepdims; -mod reduce_l1_i8_export_keepdims; -mod reduce_l1_i8_export_negative_axes_keepdims; -mod reduce_l1_u32_export_do_not_keepdims; -mod reduce_l1_u32_export_keepdims; -mod reduce_l1_u32_export_negative_axes_keepdims; -mod reduce_prod_fp16x16_1D; -mod reduce_prod_fp16x16_2D_default; -mod reduce_prod_fp16x16_2D_keepdims; -mod reduce_prod_fp16x16_2D_axis_1; -mod reduce_prod_fp8x23_1D; -mod reduce_prod_fp8x23_2D_default; -mod reduce_prod_fp8x23_2D_keepdims; -mod reduce_prod_fp8x23_2D_axis_1; -mod reduce_prod_i32_1D; -mod reduce_prod_i32_2D_default; -mod reduce_prod_i32_2D_keepdims; -mod reduce_prod_i32_2D_axis_1; -mod reduce_prod_i8_1D; -mod reduce_prod_i8_2D_default; -mod reduce_prod_i8_2D_keepdims; -mod reduce_prod_i8_2D_axis_1; -mod reduce_prod_u32_1D; -mod reduce_prod_u32_2D_default; -mod reduce_prod_u32_2D_keepdims; -mod reduce_prod_u32_2D_axis_1; -mod gather_elements_fp16x16_3d_default; -mod gather_elements_fp16x16_3d_axis1; -mod gather_elements_fp16x16_3d_axis2; -mod gather_elements_fp8x23_3d_default; -mod gather_elements_fp8x23_3d_axis1; -mod gather_elements_fp8x23_3d_axis2; -mod gather_elements_i8_3d_default; -mod gather_elements_i8_3d_axis1; -mod gather_elements_i32_3d_default; -mod gather_elements_i32_3d_axis1; -mod gather_elements_i32_3d_axis2; -mod gather_elements_u32_default; -mod gather_elements_u32_axis1; -mod gather_elements_u32_axis2; -mod gather_elements_u32_axis3; -mod sequence_length_fp16x16; -mod sequence_length_fp16x16_broadcast; -mod sequence_length_fp8x23; -mod sequence_length_fp8x23_broadcast; -mod sequence_length_i32; -mod sequence_length_i32_broadcast; -mod sequence_length_i8; -mod sequence_length_i8_broadcast; -mod sequence_length_u32; -mod sequence_length_u32_broadcast; -mod sequence_at_u32_positive; -mod sequence_at_u32_negative; -mod sequence_at_fp16x16_positive; -mod sequence_at_fp16x16_negative; -mod sequence_at_fp8x23_positive; -mod sequence_at_fp8x23_negative; -mod sequence_at_i32_positive; -mod sequence_at_i32_negative; -mod sequence_at_i8_positive; -mod sequence_at_i8_negative; -mod reduce_min_fp16x16_1D; -mod reduce_min_fp16x16_2D_default; -mod reduce_min_fp16x16_2D_keepdims; -mod reduce_min_fp16x16_2D_axis_1; -mod reduce_min_fp8x23_1D; -mod reduce_min_fp8x23_2D_default; -mod reduce_min_fp8x23_2D_keepdims; -mod reduce_min_fp8x23_2D_axis_1; -mod reduce_min_i32_1D; -mod reduce_min_i32_2D_default; -mod reduce_min_i32_2D_keepdims; -mod reduce_min_i32_2D_axis_1; -mod reduce_min_i8_1D; -mod reduce_min_i8_2D_default; -mod reduce_min_i8_2D_keepdims; -mod reduce_min_i8_2D_axis_1; -mod reduce_min_u32_1D; -mod reduce_min_u32_2D_default; -mod reduce_min_u32_2D_keepdims; -mod reduce_min_u32_2D_axis_1; -mod sequence_construct_fp16x16; -mod sequence_construct_fp8x23; -mod sequence_construct_i32; -mod sequence_construct_i8; -mod sequence_construct_u32; -mod shrink_hard_fp16x16; -mod shrink_soft_fp16x16; -mod shrink_hard_fp8x23; -mod shrink_soft_fp8x23; -mod sequence_empty_fp16x16; -mod sequence_empty_fp8x23; -mod sequence_empty_i32; -mod sequence_empty_i8; -mod sequence_empty_u32; -mod reduce_mean_fp16x16_1D; -mod reduce_mean_fp16x16_2D_default; -mod reduce_mean_fp16x16_2D_keepdims; -mod reduce_mean_fp16x16_2D_axis_1; -mod reduce_mean_fp8x23_1D; -mod reduce_mean_fp8x23_2D_default; -mod reduce_mean_fp8x23_2D_keepdims; -mod reduce_mean_fp8x23_2D_axis_1; -mod reduce_mean_i32_1D; -mod reduce_mean_i32_2D_default; -mod reduce_mean_i32_2D_keepdims; -mod reduce_mean_i32_2D_axis_1; -mod reduce_mean_i8_1D; -mod reduce_mean_i8_2D_default; -mod reduce_mean_i8_2D_keepdims; -mod reduce_mean_i8_2D_axis_1; -mod reduce_mean_u32_1D; -mod reduce_mean_u32_2D_default; -mod reduce_mean_u32_2D_keepdims; -mod reduce_mean_u32_2D_axis_1; -mod pow_fp16x16; -mod pow_fp16x16_broadcast; -mod pow_fp8x23; -mod pow_fp8x23_broadcast; -mod sequence_erase_u32_positive; -mod sequence_erase_u32_negative; -mod sequence_erase_u32_empty; -mod sequence_erase_fp16x16_positive; -mod sequence_erase_fp16x16_negative; -mod sequence_erase_fp16x16_empty; -mod sequence_erase_fp8x23_positive; -mod sequence_erase_fp8x23_negative; -mod sequence_erase_fp8x23_empty; -mod sequence_erase_i32_positive; -mod sequence_erase_i32_negative; -mod sequence_erase_i32_empty; -mod sequence_erase_i8_positive; -mod sequence_erase_i8_negative; -mod sequence_erase_i8_empty; -mod sequence_insert_fp16x16; -mod sequence_insert_fp8x23; -mod sequence_insert_i32; -mod sequence_insert_i8; -mod sequence_insert_u32; -mod concat_from_sequence_fp8x23_new_axis_zero; -mod concat_from_sequence_fp8x23_new_axis_one; -mod concat_from_sequence_fp8x23_new_axis_default; -mod concat_from_sequence_fp16x16_new_axis_zero; -mod concat_from_sequence_fp16x16_new_axis_one; -mod concat_from_sequence_fp16x16_new_axis_default; -mod concat_from_sequence_i32_new_axis_zero; -mod concat_from_sequence_i32_new_axis_one; -mod concat_from_sequence_i32_new_axis_default; -mod concat_from_sequence_i8_new_axis_zero; -mod concat_from_sequence_i8_new_axis_one; -mod concat_from_sequence_i8_new_axis_default; -mod concat_from_sequence_u32_new_axis_zero; -mod concat_from_sequence_u32_new_axis_one; -mod concat_from_sequence_u32_new_axis_default; -// mod is_nan_fp16x16; -// mod is_inf_i32; -// mod is_pos_inf_i32; -// mod is_neg_inf_i32; -mod reduce_log_sum_fp8x23_export_do_not_keepdims; -mod reduce_log_sum_fp8x23_export_keepdims; -mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; -mod reduce_log_sum_fp16x16_export_do_not_keepdims; -mod reduce_log_sum_fp16x16_export_keepdims; -mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; -mod and_bool; -mod erf_fp16x16; -mod erf_fp8x23; -mod unique_fp16x16_without_axis_sorted; -mod unique_fp16x16_with_axis_zero_sorted; -mod unique_u32_without_axis_sorted; -mod unique_u32_without_axis_not_sorted; -mod unique_u32_with_axis_zero_sorted; -mod unique_u32_with_axis_zero_not_sorted; -mod unique_u32_with_axis_one_sorted; -mod unique_u32_with_axis_one_not_sorted; -mod gather_nd_fp16x16_3d_default; -mod gather_nd_fp16x16_3d_batch_dims1; -mod gather_nd_fp16x16_3d_batch_dims2; -mod gather_nd_fp8x23_3d_default; -mod gather_nd_fp8x23_3d_batch_dims1; -mod gather_nd_fp8x23_3d_batch_dims2; -mod gather_nd_i32_3d_default; -mod gather_nd_i32_3d_batch_dims1; -mod gather_nd_i32_3d_batch_dims2; -mod gather_nd_i8_3d_default; -mod gather_nd_i8_3d_batch_dims1; -mod gather_nd_u32_default; -mod gather_nd_u32_batch_dims1; -mod gather_nd_u32_batch_dims2; -mod resize_upsample_scales_nearest; -mod resize_downsample_scales_cubic; -mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; -mod resize_downsample_scales_cubic_align_corners; -mod resize_upsample_scales_linear; -mod resize_downsample_scales_linear_align_corners; -mod resize_downsample_scales_nearest; -mod resize_upsample_scales_cubic; -mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; -mod resize_upsample_scales_cubic_align_corners; -mod resize_upsample_scales_cubic_asymmetric; -mod resize_upsample_scales_linear_align_corners; -mod resize_upsample_sizes_nearest; -mod resize_upsample_sizes_cubic; -mod resize_downsample_sizes_cubic; -mod resize_downsample_sizes_nearest; -mod resize_upsample_scales_linear_half_pixel_symmetric; -mod resize_downsample_scales_cubic_antialias; -mod resize_downsample_scales_linear_antialias; -mod resize_downsample_sizes_cubic_antialias; -mod resize_downsample_sizes_linear_pytorch_half_pixel; -mod resize_tf_crop_and_resize; -mod resize_tf_crop_and_resize_extrapolation_value; -mod resize_upsample_scales_nearest_axes_2_3; -mod resize_upsample_scales_nearest_axes_3_2; -mod resize_upsample_sizes_nearest_axes_2_3; -mod resize_upsample_sizes_nearest_ceil_half_pixel; -mod resize_upsample_sizes_nearest_floor_align_corners; -mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; -mod resize_downsample_scales_linear_half_pixel_symmetric; -mod resize_downsample_sizes_nearest_not_larger; -mod resize_downsample_sizes_nearest_not_smaller; -mod resize_tf_crop_and_resize_axes_2_3; -mod resize_tf_crop_and_resize_axes_3_2; -mod resize_upsample_sizes_nearest_axes_3_2; -mod resize_upsample_sizes_nearest_not_larger; -mod resize_upsample_sizes_nearest_not_smaller; -mod compress_fp16x16_3d_default; -mod compress_fp16x16_3d_axis1; -mod compress_fp16x16_3d_axis2; -mod compress_fp16x16_3d_axis3; -mod compress_fp16x16_3d_noaxis; -mod compress_fp8x23_3d_default; -mod compress_fp8x23_3d_axis1; -mod compress_fp8x23_3d_axis2; -mod compress_i32_3d_default; -mod compress_i32_3d_axis1; -mod compress_i32_3d_axis2; -mod compress_i8_3d_default; -mod compress_i8_3d_axis1; -mod compress_i8_3d_axis2; -mod compress_u32_3d_default; -mod compress_u32_3d_axis1; -mod compress_u32_3d_axis2; -mod compress_u32_3d_axis2_2; -mod compress_u32_3d_axis3; -mod reduce_log_sum_exp_fp32x32_export_do_not_keepdims; -mod reduce_log_sum_exp_fp32x32_export_keepdims; -mod reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims; -mod layer_normalization_default_axis; -mod layer_normalization_4d_axis0; -mod layer_normalization_4d_axis1; -mod layer_normalization_4d_axis2; -mod layer_normalization_4d_axis3; -mod layer_normalization_3d_axis0_epsilon; -mod layer_normalization_3d_axis_negative_3_epsilon; -mod layer_normalization_3d_axis1_epsilon; -mod layer_normalization_3d_axis2_epsilon; -mod layer_normalization_4d_axis_negative_4; -mod layer_normalization_4d_axis_negative_3; -mod layer_normalization_4d_axis_negative_2; -mod layer_normalization_4d_axis_negative_1; -mod layer_normalization_3d_axis_negative_2_epsilon; -mod layer_normalization_3d_axis_negative_1_epsilon; -mod layer_normalization_test; -mod split_u32_1d_equal_parts; -mod split_u32_2d_equal_parts; -mod split_u32_zero_size; -mod split_u32_1d_variable_parts; -mod split_u32_2d_variable_parts; -mod split_u32_1d_uneven; -mod split_u32_2d_uneven; -mod split_fp16x16_1d_equal_parts; -mod split_fp16x16_1d_variable_parts; -mod split_fp16x16_2d_equal_parts; -mod split_fp16x16_2d_variable_parts; -mod split_fp16x16_zero_size; -mod split_fp16x16_1d_uneven; -mod split_fp16x16_2d_uneven; -mod grid_sample; -mod grid_sample_cubic; -mod grid_sample_aligncorners; -mod grid_sample_nearest; -mod grid_sample_nearest_aligncorner; -mod grid_sample_padding_border; -mod grid_sample_padding_reflection; -mod grid_sample_padding_zeros; -mod col2im; -mod col2im_5D; -mod col2im_dilations; -mod col2im_pads; -mod col2im_strides; -mod random_uniform_like_fp16x16; -mod random_uniform_like_fp8x23; -mod range_fp8x23; -mod range_fp16x16; -mod range_i32; -mod range_i8; -mod range_u32; -mod hann_window_fp8x23; -mod hann_window_fp16x16; -mod hamming_window_fp16x16; -mod hamming_window_fp8x23; -mod blackman_window_fp16x16; -mod blackman_window_fp8x23; -mod split_to_sequence_fp16x16_1d_equal_parts; -mod split_to_sequence_fp16x16_1d_variable_parts; -mod split_to_sequence_fp16x16_2d_equal_parts; -mod split_to_sequence_fp16x16_2d_variable_parts; -mod split_to_sequence_fp16x16_zero_size; -mod split_to_sequence_fp16x16_1d_uneven; -mod split_to_sequence_fp16x16_2d_uneven; -mod split_to_sequence_u32_1d_equal_parts; -mod split_to_sequence_u32_1d_variable_parts; -mod split_to_sequence_u32_2d_equal_parts; -mod split_to_sequence_u32_2d_variable_parts; -mod split_to_sequence_u32_zero_size; -mod split_to_sequence_u32_1d_uneven; -mod split_to_sequence_u32_2d_uneven; -mod split_to_sequence_2d_scalar; -mod split_to_sequence_2d_nokeepdims; -mod split_to_sequence_1d_nokeepdims; -mod reverse_sequence_fp16x16_batch_equal_parts; -mod reverse_sequence_fp16x16_time_equal_parts; -mod reverse_sequence_i32_batch_equal_parts; -mod reverse_sequence_i32_time_equal_parts; -mod reverse_sequence_i8_batch_equal_parts; -mod reverse_sequence_i8_time_equal_parts; -mod reverse_sequence_u32_4x4_batch; -mod reverse_sequence_u32_4x4_time; -mod reverse_sequence_u32_3x3_batch; -mod reverse_sequence_u32_3x3_time; -mod reverse_sequence_different_dimensions_4_5; -mod reverse_sequence_different_dimensions_2_4; -mod reverse_sequence_different_dimensions_1_6; -mod reverse_sequence_different_dimensions_3x9_batch; -mod reverse_sequence_different_dimensions_3x9_time; -mod conv_transpose; -mod conv_transpose_1d; -mod conv_transpose_3d; -mod conv_transpose_attributes; -mod conv_transpose_autopad_same; -mod conv_transpose_dilations; -mod conv_transpose_pads; -mod conv_transpose_group_2; -mod conv_transpose_group_2_image_3; -mod depth_to_space_fp16x16; -mod depth_to_space_fp8x23; -mod depth_to_space_i32; -mod depth_to_space_i8; -mod depth_to_space_u32; -mod space_to_depth_fp16x16; -mod space_to_depth_fp8x23; -mod space_to_depth_i32; -mod space_to_depth_i8; -mod space_to_depth_u32; -mod scatter_nd_fp16x16_3d_default; -mod scatter_nd_fp16x16_3d_add; -mod scatter_nd_fp16x16_3d_mul; -mod scatter_nd_fp16x16_3d_max; -mod scatter_nd_fp16x16_3d_min; -mod scatter_nd_fp8x23_3d_default; -mod scatter_nd_fp8x23_3d_add; -mod scatter_nd_fp8x23_3d_mul; -mod scatter_nd_fp8x23_3d_max; -mod scatter_nd_fp8x23_3d_min; -mod scatter_nd_u32_default; -mod scatter_nd_u32_add; -mod scatter_nd_u32_mul; -mod scatter_nd_u32_max; -mod scatter_nd_u32_min; -mod conv_2D_with_padding; -mod conv_1D_no_padding; -mod conv_1D_with_padding; -mod conv_3D_no_padding; -mod conv_3D_with_padding; -mod conv_4D_no_padding; -mod conv_2D_with_2_groups; -mod conv_2D_with_autopad_same; -mod conv_2D_with_strides_asymmetric_padding; -mod conv_2D_with_strides_with_padding; -mod conv_4D_with_padding; -mod label_encoder_fp16x16_3d_default; -mod label_encoder_fp8x23_default; -mod label_encoder_i8_default; -mod label_encoder_i32_default; -mod label_encoder_u32_default; -mod reduce_sum_single_axis_fp16x16_1D; -mod reduce_sum_single_axis_fp16x16_2D_default; -mod reduce_sum_single_axis_fp16x16_2D_keepdims; -mod reduce_sum_single_axis_fp16x16_2D_axis_1; -mod reduce_sum_single_axis_fp8x23_1D; -mod reduce_sum_single_axis_fp8x23_2D_default; -mod reduce_sum_single_axis_fp8x23_2D_keepdims; -mod reduce_sum_single_axis_fp8x23_2D_axis_1; -mod reduce_sum_single_axis_i32_1D; -mod reduce_sum_single_axis_i32_2D_default; -mod reduce_sum_single_axis_i32_2D_keepdims; -mod reduce_sum_single_axis_i32_2D_axis_1; -mod reduce_sum_single_axis_i8_1D; -mod reduce_sum_single_axis_i8_2D_default; -mod reduce_sum_single_axis_i8_2D_keepdims; -mod reduce_sum_single_axis_i8_2D_axis_1; -mod reduce_sum_single_axis_u32_1D; -mod reduce_sum_single_axis_u32_2D_default; -mod reduce_sum_single_axis_u32_2D_keepdims; -mod reduce_sum_single_axis_u32_2D_axis_1; -mod reduce_sum_keep_dims; -mod reduce_sum_no_keep_dims; -mod reduce_sum_default_axes_keepdims; -mod reduce_sum_empty_axes_input_noop; -mod and_bool_broadcast; +// mod abs_fp16x16; +// mod abs_fp8x23; +// mod abs_i32; +// mod abs_i8; +// mod acos_fp16x16; +// mod acos_fp8x23; +// mod acosh_fp16x16; +// mod acosh_fp8x23; +// mod add_fp16x16; +// mod add_fp16x16_broadcast; +// mod add_fp8x23; +// mod add_fp8x23_broadcast; +// mod add_i32; +// mod add_i32_broadcast; +// mod add_i8; +// mod add_i8_broadcast; +// mod add_u32; +// mod add_u32_broadcast; +// mod argmax_fp16x16_1D_default; +// mod argmax_fp16x16_1D_keepdims_false; +// mod argmax_fp16x16_1D_last_index; +// mod argmax_fp16x16_2D_default; +// mod argmax_fp16x16_2D_keepdims_false; +// mod argmax_fp16x16_2D_last_index; +// mod argmax_fp16x16_3D_default; +// mod argmax_fp16x16_3D_keepdims_false; +// mod argmax_fp16x16_3D_last_index; +// mod argmax_fp8x23_1D_default; +// mod argmax_fp8x23_1D_keepdims_false; +// mod argmax_fp8x23_1D_last_index; +// mod argmax_fp8x23_2D_default; +// mod argmax_fp8x23_2D_keepdims_false; +// mod argmax_fp8x23_2D_last_index; +// mod argmax_fp8x23_3D_default; +// mod argmax_fp8x23_3D_keepdims_false; +// mod argmax_fp8x23_3D_last_index; +// mod argmax_i32_1D_default; +// mod argmax_i32_1D_keepdims_false; +// mod argmax_i32_1D_last_index; +// mod argmax_i32_2D_default; +// mod argmax_i32_2D_keepdims_false; +// mod argmax_i32_2D_last_index; +// mod argmax_i32_3D_default; +// mod argmax_i32_3D_keepdims_false; +// mod argmax_i32_3D_last_index; +// mod argmax_i8_1D_default; +// mod argmax_i8_1D_keepdims_false; +// mod argmax_i8_1D_last_index; +// mod argmax_i8_2D_default; +// mod argmax_i8_2D_keepdims_false; +// mod argmax_i8_2D_last_index; +// mod argmax_i8_3D_default; +// mod argmax_i8_3D_keepdims_false; +// mod argmax_i8_3D_last_index; +// mod argmax_u32_1D_default; +// mod argmax_u32_1D_keepdims_false; +// mod argmax_u32_1D_last_index; +// mod argmax_u32_2D_default; +// mod argmax_u32_2D_keepdims_false; +// mod argmax_u32_2D_last_index; +// mod argmax_u32_3D_default; +// mod argmax_u32_3D_keepdims_false; +// mod argmax_u32_3D_last_index; +// mod argmin_fp16x16_1D_default; +// mod argmin_fp16x16_1D_keepdims_false; +// mod argmin_fp16x16_1D_last_index; +// mod argmin_fp16x16_2D_default; +// mod argmin_fp16x16_2D_keepdims_false; +// mod argmin_fp16x16_2D_last_index; +// mod argmin_fp16x16_3D_default; +// mod argmin_fp16x16_3D_keepdims_false; +// mod argmin_fp16x16_3D_last_index; +// mod argmin_fp8x23_1D_default; +// mod argmin_fp8x23_1D_keepdims_false; +// mod argmin_fp8x23_1D_last_index; +// mod argmin_fp8x23_2D_default; +// mod argmin_fp8x23_2D_keepdims_false; +// mod argmin_fp8x23_2D_last_index; +// mod argmin_fp8x23_3D_default; +// mod argmin_fp8x23_3D_keepdims_false; +// mod argmin_fp8x23_3D_last_index; +// mod argmin_i32_1D_default; +// mod argmin_i32_1D_keepdims_false; +// mod argmin_i32_1D_last_index; +// mod argmin_i32_2D_default; +// mod argmin_i32_2D_keepdims_false; +// mod argmin_i32_2D_last_index; +// mod argmin_i32_3D_default; +// mod argmin_i32_3D_keepdims_false; +// mod argmin_i32_3D_last_index; +// mod argmin_i8_1D_default; +// mod argmin_i8_1D_keepdims_false; +// mod argmin_i8_1D_last_index; +// mod argmin_i8_2D_default; +// mod argmin_i8_2D_keepdims_false; +// mod argmin_i8_2D_last_index; +// mod argmin_i8_3D_default; +// mod argmin_i8_3D_keepdims_false; +// mod argmin_i8_3D_last_index; +// mod argmin_u32_1D_default; +// mod argmin_u32_1D_keepdims_false; +// mod argmin_u32_1D_last_index; +// mod argmin_u32_2D_default; +// mod argmin_u32_2D_keepdims_false; +// mod argmin_u32_2D_last_index; +// mod argmin_u32_3D_default; +// mod argmin_u32_3D_keepdims_false; +// mod argmin_u32_3D_last_index; +// mod asin_fp16x16; +// mod asin_fp8x23; +// mod asinh_fp16x16; +// mod asinh_fp8x23; +// mod atan_fp16x16; +// mod atan_fp8x23; +// mod ceil_fp16x16; +// mod ceil_fp8x23; +// mod concat_fp16x16_1d; +// mod concat_fp16x16_2d; +// mod concat_fp16x16_3d_default; +// mod concat_fp16x16_3d_axis_1; +// mod concat_fp16x16_3d_axis_2; +// mod concat_fp16x16_3d_three_tensors_axis_1; +// mod concat_fp16x16_3d_three_tensors_axis_2; +// mod concat_fp8x23_1d; +// mod concat_fp8x23_2d; +// mod concat_fp8x23_3d_default; +// mod concat_fp8x23_3d_axis_1; +// mod concat_fp8x23_3d_axis_2; +// mod concat_fp8x23_3d_three_tensors_axis_1; +// mod concat_fp8x23_3d_three_tensors_axis_2; +// mod concat_i32_1d; +// mod concat_i32_2d; +// mod concat_i32_3d_default; +// mod concat_i32_3d_axis_1; +// mod concat_i32_3d_axis_2; +// mod concat_i32_3d_three_tensors_axis_1; +// mod concat_i32_3d_three_tensors_axis_2; +// mod concat_i8_1d; +// mod concat_i8_2d; +// mod concat_i8_3d_default; +// mod concat_i8_3d_axis_1; +// mod concat_i8_3d_axis_2; +// mod concat_i8_3d_three_tensors_axis_1; +// mod concat_i8_3d_three_tensors_axis_2; +// mod concat_u32_1d; +// mod concat_u32_2d; +// mod concat_u32_3d_default; +// mod concat_u32_3d_axis_1; +// mod concat_u32_3d_axis_2; +// mod concat_u32_3d_three_tensors_axis_1; +// mod concat_u32_3d_three_tensors_axis_2; +// mod cos_fp16x16; +// mod cos_fp8x23; +// mod cosh_fp16x16; +// mod cosh_fp8x23; +// mod cumsum_fp16x16_1d_default; +// mod cumsum_fp16x16_1d_exclusive; +// mod cumsum_fp16x16_1d_reverse; +// mod cumsum_fp16x16_1d_reverse_exclusive; +// mod cumsum_fp16x16_2d_axis_0; +// mod cumsum_fp16x16_2d_axis_1; +// mod cumsum_fp8x23_1d_default; +// mod cumsum_fp8x23_1d_exclusive; +// mod cumsum_fp8x23_1d_reverse; +// mod cumsum_fp8x23_1d_reverse_exclusive; +// mod cumsum_fp8x23_2d_axis_0; +// mod cumsum_fp8x23_2d_axis_1; +// mod cumsum_i32_1d_default; +// mod cumsum_i32_1d_exclusive; +// mod cumsum_i32_1d_reverse; +// mod cumsum_i32_1d_reverse_exclusive; +// mod cumsum_i32_2d_axis_0; +// mod cumsum_i32_2d_axis_1; +// mod cumsum_i8_1d_default; +// mod cumsum_i8_1d_exclusive; +// mod cumsum_i8_1d_reverse; +// mod cumsum_i8_1d_reverse_exclusive; +// mod cumsum_i8_2d_axis_0; +// mod cumsum_i8_2d_axis_1; +// mod cumsum_u32_1d_default; +// mod cumsum_u32_1d_exclusive; +// mod cumsum_u32_1d_reverse; +// mod cumsum_u32_1d_reverse_exclusive; +// mod cumsum_u32_2d_axis_0; +// mod cumsum_u32_2d_axis_1; +// mod div_fp16x16; +// mod div_fp16x16_broadcast; +// mod div_fp8x23; +// mod div_fp8x23_broadcast; +// mod div_i32; +// mod div_i32_broadcast; +// mod div_i8; +// mod div_i8_broadcast; +// mod div_u32; +// mod div_u32_broadcast; +// mod equal_fp16x16; +// mod equal_fp16x16_broadcast; +// mod equal_fp8x23; +// mod equal_fp8x23_broadcast; +// mod equal_i32; +// mod equal_i32_broadcast; +// mod equal_i8; +// mod equal_i8_broadcast; +// mod equal_u32; +// mod equal_u32_broadcast; +// mod exp_fp16x16; +// mod exp_fp8x23; +// mod less_equal_fp16x16; +// mod less_equal_fp16x16_broadcast; +// mod less_equal_fp8x23; +// mod less_equal_fp8x23_broadcast; +// mod less_equal_i32; +// mod less_equal_i32_broadcast; +// mod less_equal_i8; +// mod less_equal_i8_broadcast; +// mod less_equal_u32; +// mod less_equal_u32_broadcast; +// mod greater_fp16x16; +// mod greater_fp16x16_broadcast; +// mod greater_fp8x23; +// mod greater_fp8x23_broadcast; +// mod greater_i32; +// mod greater_i32_broadcast; +// mod greater_i8; +// mod greater_i8_broadcast; +// mod greater_u32; +// mod greater_u32_broadcast; +// mod leaky_relu_fp16x16; +// mod leaky_relu_fp8x23; +// mod linear_fp16x16; +// mod linear_fp8x23; +// mod linear_i32; +// mod linear_i8; +// mod linear_u32; +// mod log_fp16x16; +// mod log_fp8x23; +// mod logsoftmax_fp16x16_axis_0; +// mod logsoftmax_fp16x16_axis_1; +// mod logsoftmax_fp8x23_axis_0; +// mod logsoftmax_fp8x23_axis_1; +// mod matmul_fp16x16_1d; +// mod matmul_fp16x16_2x2; +// mod matmul_fp16x16_2x1; +// mod matmul_fp16x16_1x2; +// mod matmul_fp8x23_1d; +// mod matmul_fp8x23_2x2; +// mod matmul_fp8x23_2x1; +// mod matmul_fp8x23_1x2; +// mod matmul_i32_1d; +// mod matmul_i32_2x2; +// mod matmul_i32_2x1; +// mod matmul_i32_1x2; +// mod matmul_i8_1d; +// mod matmul_i8_2x2; +// mod matmul_i8_2x1; +// mod matmul_i8_1x2; +// mod matmul_u32_1d; +// mod matmul_u32_2x2; +// mod matmul_u32_2x1; +// mod matmul_u32_1x2; +// mod mul_fp16x16; +// mod mul_fp16x16_broadcast; +// mod mul_fp8x23; +// mod mul_fp8x23_broadcast; +// mod mul_i32; +// mod mul_i32_broadcast; +// mod mul_i8; +// mod mul_i8_broadcast; +// mod mul_u32; +// mod mul_u32_broadcast; +// mod or_fp16x16; +// mod or_fp16x16_broadcast; +// mod or_fp8x23; +// mod or_fp8x23_broadcast; +// mod or_i32; +// mod or_i32_broadcast; +// mod or_i8; +// mod or_i8_broadcast; +// mod or_u32; +// mod or_u32_broadcast; +// mod relu_fp16x16; +// mod relu_fp8x23; +// mod relu_i32; +// mod relu_i8; +// mod sigmoid_fp16x16; +// mod sigmoid_fp8x23; +// mod sin_fp16x16; +// mod sin_fp8x23; +// mod sinh_fp16x16; +// mod sinh_fp8x23; +// mod softmax_fp16x16; +// mod softmax_fp8x23; +// mod softplus_fp8x23; +// mod softplus_fp16x16; +// mod softsign_fp8x23; +// mod softsign_fp16x16; +// mod sqrt_fp16x16; +// mod sqrt_fp8x23; +// mod sub_fp16x16; +// mod sub_fp16x16_broadcast; +// mod sub_fp8x23; +// mod sub_fp8x23_broadcast; +// mod sub_i32; +// mod sub_i32_broadcast; +// mod sub_i8; +// mod sub_i8_broadcast; +// mod sub_u32; +// mod sub_u32_broadcast; +// mod tanh_fp16x16; +// mod tanh_fp8x23; +// mod transpose_fp16x16_2d; +// mod transpose_fp16x16_3d; +// mod transpose_fp8x23_2d; +// mod transpose_fp8x23_3d; +// mod transpose_i32_2d; +// mod transpose_i32_3d; +// mod transpose_i8_2d; +// mod transpose_i8_3d; +// mod transpose_u32_2d; +// mod transpose_u32_3d; +// mod xor_fp16x16; +// mod xor_fp16x16_broadcast; +// mod xor_fp8x23; +// mod xor_fp8x23_broadcast; +// mod xor_i32; +// mod xor_i32_broadcast; +// mod xor_i8; +// mod xor_i8_broadcast; +// mod xor_u32; +// mod xor_u32_broadcast; +// mod less_fp16x16; +// mod less_fp16x16_broadcast; +// mod less_fp8x23; +// mod less_fp8x23_broadcast; +// mod less_i32; +// mod less_i32_broadcast; +// mod less_i8; +// mod less_i8_broadcast; +// mod less_u32; +// mod less_u32_broadcast; +// mod greater_equal_fp16x16; +// mod greater_equal_fp16x16_broadcast; +// mod greater_equal_fp8x23; +// mod greater_equal_fp8x23_broadcast; +// mod greater_equal_i32; +// mod greater_equal_i32_broadcast; +// mod greater_equal_i8; +// mod greater_equal_i8_broadcast; +// mod greater_equal_u32; +// mod greater_equal_u32_broadcast; +// mod slice_fp16x16_2d; +// mod slice_fp16x16_3d; +// mod slice_fp8x23_2d; +// mod slice_fp8x23_3d; +// mod slice_i32_2d; +// mod slice_i32_3d; +// mod slice_i8_2d; +// mod slice_i8_3d; +// mod slice_u32_2d; +// mod slice_u32_3d; +// mod gather_fp8x23_3d_default; +// mod gather_fp8x23_3d_axis1; +// mod gather_fp8x23_3d_axis2; +// mod gather_fp16x16_3d_default; +// mod gather_fp16x16_3d_axis1; +// mod gather_fp16x16_3d_axis2; +// mod gather_i8_3d_default; +// mod gather_i8_3d_axis1; +// mod gather_i8_3d_axis2; +// mod gather_i32_3d_default; +// mod gather_i32_3d_axis1; +// mod gather_i32_3d_axis2; +// mod gather_u32_3d_default; +// mod gather_u32_3d_axis1; +// mod gather_u32_3d_axis2; +// mod nonzero_fp16x16_2d; +// mod nonzero_fp16x16_3d; +// mod nonzero_fp8x23_2d; +// mod nonzero_fp8x23_3d; +// mod nonzero_i32_2d; +// mod nonzero_i32_3d; +// mod nonzero_i8_2d; +// mod nonzero_i8_3d; +// mod nonzero_u32_2d; +// mod nonzero_u32_3d; +// mod squeeze_fP16x16; +// mod squeeze_fP8x23; +// mod squeeze_i32; +// mod squeeze_i8; +// mod squeeze_u32; +// mod unsqueeze_fp16x16_2d; +// mod unsqueeze_fp16x16_3d; +// mod unsqueeze_fp8x23_2d; +// mod unsqueeze_fp8x23_3d; +// mod unsqueeze_i32_2d; +// mod unsqueeze_i32_3d; +// mod unsqueeze_i8_2d; +// mod unsqueeze_i8_3d; +// mod unsqueeze_u32_2d; +// mod unsqueeze_u32_3d; +// mod sign_fP16x16; +// mod sign_fP8x23; +// mod sign_fail; +// mod sign_i32; +// mod sign_i8; +// mod clip_fp16x16_2d; +// mod clip_fp16x16_3d; +// mod clip_fp8x23_2d; +// mod clip_fp8x23_3d; +// mod clip_i32_2d; +// mod clip_i32_3d; +// mod clip_i8_2d; +// mod clip_i8_3d; +// mod clip_u32_2d; +// mod clip_u32_3d; +// mod identity_fP16x16; +// mod identity_fP8x23; +// mod identity_i32; +// mod identity_i8; +// mod identity_u32; +// mod thresholded_relu_fp16x16; +// mod thresholded_relu_fp8x23; +// mod hard_sigmoid_fp8x23; +// mod hard_sigmoid_fp16x16; +// mod neg_fp16x16; +// mod neg_fp8x23; +// mod neg_i32; +// mod neg_i8; +// mod gemm_all_attributes; +// mod gemm_alpha; +// mod gemm_beta; +// mod gemm_default_matrix_bias; +// mod gemm_default_vector_bias; +// mod gemm_default_no_bias; +// mod gemm_transposeA; +// mod gemm_transposeB; +// mod min_fp16x16_three_tensors; +// mod min_fp16x16_broadcast_three_tensors; +// mod min_fp16x16_two_tensors; +// mod min_fp16x16_broadcast_two_tensors; +// mod min_fp8x23_three_tensors; +// mod min_fp8x23_broadcast_three_tensors; +// mod min_fp8x23_two_tensors; +// mod min_fp8x23_broadcast_two_tensors; +// mod min_i32_three_tensors; +// mod min_i32_broadcast_three_tensors; +// mod min_i32_two_tensors; +// mod min_i32_broadcast_two_tensors; +// mod min_i8_three_tensors; +// mod min_i8_broadcast_three_tensors; +// mod min_i8_two_tensors; +// mod min_i8_broadcast_two_tensors; +// mod min_u32_three_tensors; +// mod min_u32_broadcast_three_tensors; +// mod min_u32_two_tensors; +// mod min_u32_broadcast_two_tensors; +// mod where_fp16x16; +// mod where_fp16x16_broadcast; +// mod where_fp8x23; +// mod where_fp8x23_broadcast; +// mod where_i32; +// mod where_i32_broadcast; +// mod where_i8; +// mod where_i8_broadcast; +// mod where_u32; +// mod where_u32_broadcast; +// mod not_bool; +// mod round_fp16x16; +// mod round_fp8x23; +// mod max_fp16x16_three_tensors; +// mod max_fp16x16_broadcast_three_tensors; +// mod max_fp16x16_two_tensors; +// mod max_fp16x16_broadcast_two_tensors; +// mod max_fp8x23_three_tensors; +// mod max_fp8x23_broadcast_three_tensors; +// mod max_fp8x23_two_tensors; +// mod max_fp8x23_broadcast_two_tensors; +// mod max_i32_three_tensors; +// mod max_i32_broadcast_three_tensors; +// mod max_i32_two_tensors; +// mod max_i32_broadcast_two_tensors; +// mod max_i8_three_tensors; +// mod max_i8_broadcast_three_tensors; +// mod max_i8_two_tensors; +// mod max_i8_broadcast_two_tensors; +// mod max_u32_three_tensors; +// mod max_u32_broadcast_three_tensors; +// mod max_u32_two_tensors; +// mod max_u32_broadcast_two_tensors; +// mod scatter_fp16x16_3d_default; +// mod scatter_fp16x16_3d_axis1; +// mod scatter_fp16x16_3d_axis1_add; +// mod scatter_fp8x23_default; +// mod scatter_fp8x23_axis1; +// mod scatter_fp8x23_mul; +// mod scatter_i8_default; +// mod scatter_i8_axis1; +// mod scatter_i8_axis1_max; +// mod scatter_u32_default; +// mod scatter_u32_axis1; +// mod scatter_u32_add; +// mod array_feature_extractor_1D_i32; +// mod array_feature_extractor_1D_fp8x23; +// mod array_feature_extractor_1D_fp16x16; +// mod array_feature_extractor_2D_i32; +// mod array_feature_extractor_2D_fp8x23; +// mod array_feature_extractor_2D_fp16x16; +// mod array_feature_extractor_3D_i32; +// mod array_feature_extractor_3D_fp8x23; +// mod array_feature_extractor_3D_fp16x16; +// mod binarizer_fp16x16; +// mod binarizer_fp8x23; +// mod tril_fp16x16; +// mod tril_fp16x16_neg; +// mod tril_fp16x16_one_row; +// mod tril_fp16x16_out_neg; +// mod tril_fp16x16_out_pos; +// mod tril_fp16x16_pos; +// mod tril_fp16x16_square; +// mod tril_fp16x16_square_neg; +// mod tril_fp16x16_zero; +// mod triu_fp16x16; +// mod triu_fp16x16_neg; +// mod triu_fp16x16_one_row; +// mod triu_fp16x16_out_neg; +// mod triu_fp16x16_out_pos; +// mod triu_fp16x16_pos; +// mod triu_fp16x16_square; +// mod triu_fp16x16_square_neg; +// mod triu_fp16x16_zero; +// mod tril_fp8x23; +// mod tril_fp8x23_neg; +// mod tril_fp8x23_one_row; +// mod tril_fp8x23_out_neg; +// mod tril_fp8x23_out_pos; +// mod tril_fp8x23_pos; +// mod tril_fp8x23_square; +// mod tril_fp8x23_square_neg; +// mod tril_fp8x23_zero; +// mod triu_fp8x23; +// mod triu_fp8x23_neg; +// mod triu_fp8x23_one_row; +// mod triu_fp8x23_out_neg; +// mod triu_fp8x23_out_pos; +// mod triu_fp8x23_pos; +// mod triu_fp8x23_square; +// mod triu_fp8x23_square_neg; +// mod triu_fp8x23_zero; +// mod tril_i32; +// mod tril_neg_i32; +// mod tril_i32_one_row; +// mod tril_i32_out_neg; +// mod tril_i32_out_pos; +// mod tril_i32_pos; +// mod tril_i32_square; +// mod tril_i32_square_neg; +// mod tril_i32_zero; +// mod triu_i32; +// mod triu_i32_neg; +// mod triu_i32_one_row; +// mod triu_i32_out_neg; +// mod triu_i32_out_pos; +// mod triu_i32_pos; +// mod triu_i32_square; +// mod triu_i32_square_neg; +// mod triu_i32_zero; +// mod tril_i8; +// mod tril_i8_neg; +// mod tril_i8_one_row; +// mod tril_i8_out_neg; +// mod tril_i8_out_pos; +// mod tril_i8_pos; +// mod tril_i8_square; +// mod tril_i8_square_neg; +// mod tril_i8_zero; +// mod triu_i8; +// mod triu_i8_neg; +// mod triu_i8_one_row; +// mod triu_i8_out_neg; +// mod triu_i8_out_pos; +// mod triu_i8_pos; +// mod triu_i8_square; +// mod triu_i8_square_neg; +// mod triu_i8_zero; +// mod tril_u32; +// mod tril_u32_neg; +// mod tril_u32_one_row; +// mod tril_u32_out_neg; +// mod tril_u32_out_pos; +// mod tril_u32_pos; +// mod tril_u32_square; +// mod tril_u32_square_neg; +// mod tril_u32_zero; +// mod triu_u32; +// mod triu_u32_neg; +// mod triu_u32_one_row; +// mod triu_u32_out_neg; +// mod triu_u32_out_pos; +// mod triu_u32_pos; +// mod triu_u32_square; +// mod triu_u32_square_neg; +// mod triu_u32_zero; +// mod reduce_sum_square_fp16x16_export_do_not_keepdims; +// mod reduce_sum_square_fp16x16_export_keepdims; +// mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; +// mod reduce_sum_square_fp8x23_export_do_not_keepdims; +// mod reduce_sum_square_fp8x23_export_keepdims; +// mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; +// mod reduce_sum_square_i32_export_do_not_keepdims; +// mod reduce_sum_square_i32_export_keepdims; +// mod reduce_sum_square_i32_export_negative_axes_keepdims; +// mod reduce_sum_square_i8_export_do_not_keepdims; +// mod reduce_sum_square_i8_export_keepdims; +// mod reduce_sum_square_i8_export_negative_axes_keepdims; +// mod reduce_sum_square_u32_export_do_not_keepdims; +// mod reduce_sum_square_u32_export_keepdims; +// mod reduce_sum_square_u32_export_negative_axes_keepdims; +// mod reduce_l2_fp16x16_export_do_not_keepdims; +// mod reduce_l2_fp16x16_export_keepdims; +// mod reduce_l2_fp16x16_export_negative_axes_keepdims; +// mod reduce_l2_fp8x23_export_do_not_keepdims; +// mod reduce_l2_fp8x23_export_keepdims; +// mod reduce_l2_fp8x23_export_negative_axes_keepdims; +// mod reduce_l1_fp16x16_export_do_not_keepdims; +// mod reduce_l1_fp16x16_export_keepdims; +// mod reduce_l1_fp16x16_export_negative_axes_keepdims; +// mod reduce_l1_fp8x23_export_do_not_keepdims; +// mod reduce_l1_fp8x23_export_keepdims; +// mod reduce_l1_fp8x23_export_negative_axes_keepdims; +// mod reduce_l1_i32_export_do_not_keepdims; +// mod reduce_l1_i32_export_keepdims; +// mod reduce_l1_i32_export_negative_axes_keepdims; +// mod reduce_l1_i8_export_do_not_keepdims; +// mod reduce_l1_i8_export_keepdims; +// mod reduce_l1_i8_export_negative_axes_keepdims; +// mod reduce_l1_u32_export_do_not_keepdims; +// mod reduce_l1_u32_export_keepdims; +// mod reduce_l1_u32_export_negative_axes_keepdims; +// mod reduce_prod_fp16x16_1D; +// mod reduce_prod_fp16x16_2D_default; +// mod reduce_prod_fp16x16_2D_keepdims; +// mod reduce_prod_fp16x16_2D_axis_1; +// mod reduce_prod_fp8x23_1D; +// mod reduce_prod_fp8x23_2D_default; +// mod reduce_prod_fp8x23_2D_keepdims; +// mod reduce_prod_fp8x23_2D_axis_1; +// mod reduce_prod_i32_1D; +// mod reduce_prod_i32_2D_default; +// mod reduce_prod_i32_2D_keepdims; +// mod reduce_prod_i32_2D_axis_1; +// mod reduce_prod_i8_1D; +// mod reduce_prod_i8_2D_default; +// mod reduce_prod_i8_2D_keepdims; +// mod reduce_prod_i8_2D_axis_1; +// mod reduce_prod_u32_1D; +// mod reduce_prod_u32_2D_default; +// mod reduce_prod_u32_2D_keepdims; +// mod reduce_prod_u32_2D_axis_1; +// mod gather_elements_fp16x16_3d_default; +// mod gather_elements_fp16x16_3d_axis1; +// mod gather_elements_fp16x16_3d_axis2; +// mod gather_elements_fp8x23_3d_default; +// mod gather_elements_fp8x23_3d_axis1; +// mod gather_elements_fp8x23_3d_axis2; +// mod gather_elements_i8_3d_default; +// mod gather_elements_i8_3d_axis1; +// mod gather_elements_i32_3d_default; +// mod gather_elements_i32_3d_axis1; +// mod gather_elements_i32_3d_axis2; +// mod gather_elements_u32_default; +// mod gather_elements_u32_axis1; +// mod gather_elements_u32_axis2; +// mod gather_elements_u32_axis3; +// mod sequence_length_fp16x16; +// mod sequence_length_fp16x16_broadcast; +// mod sequence_length_fp8x23; +// mod sequence_length_fp8x23_broadcast; +// mod sequence_length_i32; +// mod sequence_length_i32_broadcast; +// mod sequence_length_i8; +// mod sequence_length_i8_broadcast; +// mod sequence_length_u32; +// mod sequence_length_u32_broadcast; +// mod sequence_at_u32_positive; +// mod sequence_at_u32_negative; +// mod sequence_at_fp16x16_positive; +// mod sequence_at_fp16x16_negative; +// mod sequence_at_fp8x23_positive; +// mod sequence_at_fp8x23_negative; +// mod sequence_at_i32_positive; +// mod sequence_at_i32_negative; +// mod sequence_at_i8_positive; +// mod sequence_at_i8_negative; +// mod reduce_min_fp16x16_1D; +// mod reduce_min_fp16x16_2D_default; +// mod reduce_min_fp16x16_2D_keepdims; +// mod reduce_min_fp16x16_2D_axis_1; +// mod reduce_min_fp8x23_1D; +// mod reduce_min_fp8x23_2D_default; +// mod reduce_min_fp8x23_2D_keepdims; +// mod reduce_min_fp8x23_2D_axis_1; +// mod reduce_min_i32_1D; +// mod reduce_min_i32_2D_default; +// mod reduce_min_i32_2D_keepdims; +// mod reduce_min_i32_2D_axis_1; +// mod reduce_min_i8_1D; +// mod reduce_min_i8_2D_default; +// mod reduce_min_i8_2D_keepdims; +// mod reduce_min_i8_2D_axis_1; +// mod reduce_min_u32_1D; +// mod reduce_min_u32_2D_default; +// mod reduce_min_u32_2D_keepdims; +// mod reduce_min_u32_2D_axis_1; +// mod sequence_construct_fp16x16; +// mod sequence_construct_fp8x23; +// mod sequence_construct_i32; +// mod sequence_construct_i8; +// mod sequence_construct_u32; +// mod shrink_hard_fp16x16; +// mod shrink_soft_fp16x16; +// mod shrink_hard_fp8x23; +// mod shrink_soft_fp8x23; +// mod sequence_empty_fp16x16; +// mod sequence_empty_fp8x23; +// mod sequence_empty_i32; +// mod sequence_empty_i8; +// mod sequence_empty_u32; +// mod reduce_mean_fp16x16_1D; +// mod reduce_mean_fp16x16_2D_default; +// mod reduce_mean_fp16x16_2D_keepdims; +// mod reduce_mean_fp16x16_2D_axis_1; +// mod reduce_mean_fp8x23_1D; +// mod reduce_mean_fp8x23_2D_default; +// mod reduce_mean_fp8x23_2D_keepdims; +// mod reduce_mean_fp8x23_2D_axis_1; +// mod reduce_mean_i32_1D; +// mod reduce_mean_i32_2D_default; +// mod reduce_mean_i32_2D_keepdims; +// mod reduce_mean_i32_2D_axis_1; +// mod reduce_mean_i8_1D; +// mod reduce_mean_i8_2D_default; +// mod reduce_mean_i8_2D_keepdims; +// mod reduce_mean_i8_2D_axis_1; +// mod reduce_mean_u32_1D; +// mod reduce_mean_u32_2D_default; +// mod reduce_mean_u32_2D_keepdims; +// mod reduce_mean_u32_2D_axis_1; +// mod pow_fp16x16; +// mod pow_fp16x16_broadcast; +// mod pow_fp8x23; +// mod pow_fp8x23_broadcast; +// mod sequence_erase_u32_positive; +// mod sequence_erase_u32_negative; +// mod sequence_erase_u32_empty; +// mod sequence_erase_fp16x16_positive; +// mod sequence_erase_fp16x16_negative; +// mod sequence_erase_fp16x16_empty; +// mod sequence_erase_fp8x23_positive; +// mod sequence_erase_fp8x23_negative; +// mod sequence_erase_fp8x23_empty; +// mod sequence_erase_i32_positive; +// mod sequence_erase_i32_negative; +// mod sequence_erase_i32_empty; +// mod sequence_erase_i8_positive; +// mod sequence_erase_i8_negative; +// mod sequence_erase_i8_empty; +// mod sequence_insert_fp16x16; +// mod sequence_insert_fp8x23; +// mod sequence_insert_i32; +// mod sequence_insert_i8; +// mod sequence_insert_u32; +// mod concat_from_sequence_fp8x23_new_axis_zero; +// mod concat_from_sequence_fp8x23_new_axis_one; +// mod concat_from_sequence_fp8x23_new_axis_default; +// mod concat_from_sequence_fp16x16_new_axis_zero; +// mod concat_from_sequence_fp16x16_new_axis_one; +// mod concat_from_sequence_fp16x16_new_axis_default; +// mod concat_from_sequence_i32_new_axis_zero; +// mod concat_from_sequence_i32_new_axis_one; +// mod concat_from_sequence_i32_new_axis_default; +// mod concat_from_sequence_i8_new_axis_zero; +// mod concat_from_sequence_i8_new_axis_one; +// mod concat_from_sequence_i8_new_axis_default; +// mod concat_from_sequence_u32_new_axis_zero; +// mod concat_from_sequence_u32_new_axis_one; +// mod concat_from_sequence_u32_new_axis_default; +// // mod is_nan_fp16x16; +// // mod is_inf_i32; +// // mod is_pos_inf_i32; +// // mod is_neg_inf_i32; +// mod reduce_log_sum_fp8x23_export_do_not_keepdims; +// mod reduce_log_sum_fp8x23_export_keepdims; +// mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; +// mod reduce_log_sum_fp16x16_export_do_not_keepdims; +// mod reduce_log_sum_fp16x16_export_keepdims; +// mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; +// mod and_bool; +// mod erf_fp16x16; +// mod erf_fp8x23; +// mod unique_fp16x16_without_axis_sorted; +// mod unique_fp16x16_with_axis_zero_sorted; +// mod unique_u32_without_axis_sorted; +// mod unique_u32_without_axis_not_sorted; +// mod unique_u32_with_axis_zero_sorted; +// mod unique_u32_with_axis_zero_not_sorted; +// mod unique_u32_with_axis_one_sorted; +// mod unique_u32_with_axis_one_not_sorted; +// mod gather_nd_fp16x16_3d_default; +// mod gather_nd_fp16x16_3d_batch_dims1; +// mod gather_nd_fp16x16_3d_batch_dims2; +// mod gather_nd_fp8x23_3d_default; +// mod gather_nd_fp8x23_3d_batch_dims1; +// mod gather_nd_fp8x23_3d_batch_dims2; +// mod gather_nd_i32_3d_default; +// mod gather_nd_i32_3d_batch_dims1; +// mod gather_nd_i32_3d_batch_dims2; +// mod gather_nd_i8_3d_default; +// mod gather_nd_i8_3d_batch_dims1; +// mod gather_nd_u32_default; +// mod gather_nd_u32_batch_dims1; +// mod gather_nd_u32_batch_dims2; +// mod resize_upsample_scales_nearest; +// mod resize_downsample_scales_cubic; +// mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; +// mod resize_downsample_scales_cubic_align_corners; +// mod resize_upsample_scales_linear; +// mod resize_downsample_scales_linear_align_corners; +// mod resize_downsample_scales_nearest; +// mod resize_upsample_scales_cubic; +// mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; +// mod resize_upsample_scales_cubic_align_corners; +// mod resize_upsample_scales_cubic_asymmetric; +// mod resize_upsample_scales_linear_align_corners; +// mod resize_upsample_sizes_nearest; +// mod resize_upsample_sizes_cubic; +// mod resize_downsample_sizes_cubic; +// mod resize_downsample_sizes_nearest; +// mod resize_upsample_scales_linear_half_pixel_symmetric; +// mod resize_downsample_scales_cubic_antialias; +// mod resize_downsample_scales_linear_antialias; +// mod resize_downsample_sizes_cubic_antialias; +// mod resize_downsample_sizes_linear_pytorch_half_pixel; +// mod resize_tf_crop_and_resize; +// mod resize_tf_crop_and_resize_extrapolation_value; +// mod resize_upsample_scales_nearest_axes_2_3; +// mod resize_upsample_scales_nearest_axes_3_2; +// mod resize_upsample_sizes_nearest_axes_2_3; +// mod resize_upsample_sizes_nearest_ceil_half_pixel; +// mod resize_upsample_sizes_nearest_floor_align_corners; +// mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; +// mod resize_downsample_scales_linear_half_pixel_symmetric; +// mod resize_downsample_sizes_nearest_not_larger; +// mod resize_downsample_sizes_nearest_not_smaller; +// mod resize_tf_crop_and_resize_axes_2_3; +// mod resize_tf_crop_and_resize_axes_3_2; +// mod resize_upsample_sizes_nearest_axes_3_2; +// mod resize_upsample_sizes_nearest_not_larger; +// mod resize_upsample_sizes_nearest_not_smaller; +// mod compress_fp16x16_3d_default; +// mod compress_fp16x16_3d_axis1; +// mod compress_fp16x16_3d_axis2; +// mod compress_fp16x16_3d_axis3; +// mod compress_fp16x16_3d_noaxis; +// mod compress_fp8x23_3d_default; +// mod compress_fp8x23_3d_axis1; +// mod compress_fp8x23_3d_axis2; +// mod compress_i32_3d_default; +// mod compress_i32_3d_axis1; +// mod compress_i32_3d_axis2; +// mod compress_i8_3d_default; +// mod compress_i8_3d_axis1; +// mod compress_i8_3d_axis2; +// mod compress_u32_3d_default; +// mod compress_u32_3d_axis1; +// mod compress_u32_3d_axis2; +// mod compress_u32_3d_axis2_2; +// mod compress_u32_3d_axis3; +// mod reduce_log_sum_exp_fp32x32_export_do_not_keepdims; +// mod reduce_log_sum_exp_fp32x32_export_keepdims; +// mod reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims; +// mod layer_normalization_default_axis; +// mod layer_normalization_4d_axis0; +// mod layer_normalization_4d_axis1; +// mod layer_normalization_4d_axis2; +// mod layer_normalization_4d_axis3; +// mod layer_normalization_3d_axis0_epsilon; +// mod layer_normalization_3d_axis_negative_3_epsilon; +// mod layer_normalization_3d_axis1_epsilon; +// mod layer_normalization_3d_axis2_epsilon; +// mod layer_normalization_4d_axis_negative_4; +// mod layer_normalization_4d_axis_negative_3; +// mod layer_normalization_4d_axis_negative_2; +// mod layer_normalization_4d_axis_negative_1; +// mod layer_normalization_3d_axis_negative_2_epsilon; +// mod layer_normalization_3d_axis_negative_1_epsilon; +// mod layer_normalization_test; +// mod split_u32_1d_equal_parts; +// mod split_u32_2d_equal_parts; +// mod split_u32_zero_size; +// mod split_u32_1d_variable_parts; +// mod split_u32_2d_variable_parts; +// mod split_u32_1d_uneven; +// mod split_u32_2d_uneven; +// mod split_fp16x16_1d_equal_parts; +// mod split_fp16x16_1d_variable_parts; +// mod split_fp16x16_2d_equal_parts; +// mod split_fp16x16_2d_variable_parts; +// mod split_fp16x16_zero_size; +// mod split_fp16x16_1d_uneven; +// mod split_fp16x16_2d_uneven; +// mod grid_sample; +// mod grid_sample_cubic; +// mod grid_sample_aligncorners; +// mod grid_sample_nearest; +// mod grid_sample_nearest_aligncorner; +// mod grid_sample_padding_border; +// mod grid_sample_padding_reflection; +// mod grid_sample_padding_zeros; +// mod col2im; +// mod col2im_5D; +// mod col2im_dilations; +// mod col2im_pads; +// mod col2im_strides; +// mod random_uniform_like_fp16x16; +// mod random_uniform_like_fp8x23; +// mod range_fp8x23; +// mod range_fp16x16; +// mod range_i32; +// mod range_i8; +// mod range_u32; +// mod hann_window_fp8x23; +// mod hann_window_fp16x16; +// mod hamming_window_fp16x16; +// mod hamming_window_fp8x23; +// mod blackman_window_fp16x16; +// mod blackman_window_fp8x23; +// mod split_to_sequence_fp16x16_1d_equal_parts; +// mod split_to_sequence_fp16x16_1d_variable_parts; +// mod split_to_sequence_fp16x16_2d_equal_parts; +// mod split_to_sequence_fp16x16_2d_variable_parts; +// mod split_to_sequence_fp16x16_zero_size; +// mod split_to_sequence_fp16x16_1d_uneven; +// mod split_to_sequence_fp16x16_2d_uneven; +// mod split_to_sequence_u32_1d_equal_parts; +// mod split_to_sequence_u32_1d_variable_parts; +// mod split_to_sequence_u32_2d_equal_parts; +// mod split_to_sequence_u32_2d_variable_parts; +// mod split_to_sequence_u32_zero_size; +// mod split_to_sequence_u32_1d_uneven; +// mod split_to_sequence_u32_2d_uneven; +// mod split_to_sequence_2d_scalar; +// mod split_to_sequence_2d_nokeepdims; +// mod split_to_sequence_1d_nokeepdims; +// mod reverse_sequence_fp16x16_batch_equal_parts; +// mod reverse_sequence_fp16x16_time_equal_parts; +// mod reverse_sequence_i32_batch_equal_parts; +// mod reverse_sequence_i32_time_equal_parts; +// mod reverse_sequence_i8_batch_equal_parts; +// mod reverse_sequence_i8_time_equal_parts; +// mod reverse_sequence_u32_4x4_batch; +// mod reverse_sequence_u32_4x4_time; +// mod reverse_sequence_u32_3x3_batch; +// mod reverse_sequence_u32_3x3_time; +// mod reverse_sequence_different_dimensions_4_5; +// mod reverse_sequence_different_dimensions_2_4; +// mod reverse_sequence_different_dimensions_1_6; +// mod reverse_sequence_different_dimensions_3x9_batch; +// mod reverse_sequence_different_dimensions_3x9_time; +// mod conv_transpose; +// mod conv_transpose_1d; +// mod conv_transpose_3d; +// mod conv_transpose_attributes; +// mod conv_transpose_autopad_same; +// mod conv_transpose_dilations; +// mod conv_transpose_pads; +// mod conv_transpose_group_2; +// mod conv_transpose_group_2_image_3; +// mod depth_to_space_fp16x16; +// mod depth_to_space_fp8x23; +// mod depth_to_space_i32; +// mod depth_to_space_i8; +// mod depth_to_space_u32; +// mod space_to_depth_fp16x16; +// mod space_to_depth_fp8x23; +// mod space_to_depth_i32; +// mod space_to_depth_i8; +// mod space_to_depth_u32; +// mod scatter_nd_fp16x16_3d_default; +// mod scatter_nd_fp16x16_3d_add; +// mod scatter_nd_fp16x16_3d_mul; +// mod scatter_nd_fp16x16_3d_max; +// mod scatter_nd_fp16x16_3d_min; +// mod scatter_nd_fp8x23_3d_default; +// mod scatter_nd_fp8x23_3d_add; +// mod scatter_nd_fp8x23_3d_mul; +// mod scatter_nd_fp8x23_3d_max; +// mod scatter_nd_fp8x23_3d_min; +// mod scatter_nd_u32_default; +// mod scatter_nd_u32_add; +// mod scatter_nd_u32_mul; +// mod scatter_nd_u32_max; +// mod scatter_nd_u32_min; +// mod conv_2D_with_padding; +// mod conv_1D_no_padding; +// mod conv_1D_with_padding; +// mod conv_3D_no_padding; +// mod conv_3D_with_padding; +// mod conv_4D_no_padding; +// mod conv_2D_with_2_groups; +// mod conv_2D_with_autopad_same; +// mod conv_2D_with_strides_asymmetric_padding; +// mod conv_2D_with_strides_with_padding; +// mod conv_4D_with_padding; +// mod label_encoder_fp16x16_3d_default; +// mod label_encoder_fp8x23_default; +// mod label_encoder_i8_default; +// mod label_encoder_i32_default; +// mod label_encoder_u32_default; +// mod reduce_sum_single_axis_fp16x16_1D; +// mod reduce_sum_single_axis_fp16x16_2D_default; +// mod reduce_sum_single_axis_fp16x16_2D_keepdims; +// mod reduce_sum_single_axis_fp16x16_2D_axis_1; +// mod reduce_sum_single_axis_fp8x23_1D; +// mod reduce_sum_single_axis_fp8x23_2D_default; +// mod reduce_sum_single_axis_fp8x23_2D_keepdims; +// mod reduce_sum_single_axis_fp8x23_2D_axis_1; +// mod reduce_sum_single_axis_i32_1D; +// mod reduce_sum_single_axis_i32_2D_default; +// mod reduce_sum_single_axis_i32_2D_keepdims; +// mod reduce_sum_single_axis_i32_2D_axis_1; +// mod reduce_sum_single_axis_i8_1D; +// mod reduce_sum_single_axis_i8_2D_default; +// mod reduce_sum_single_axis_i8_2D_keepdims; +// mod reduce_sum_single_axis_i8_2D_axis_1; +// mod reduce_sum_single_axis_u32_1D; +// mod reduce_sum_single_axis_u32_2D_default; +// mod reduce_sum_single_axis_u32_2D_keepdims; +// mod reduce_sum_single_axis_u32_2D_axis_1; +// mod reduce_sum_keep_dims; +// mod reduce_sum_no_keep_dims; +// mod reduce_sum_default_axes_keepdims; +// mod reduce_sum_empty_axes_input_noop; +// mod and_bool_broadcast; From 54a0568db3e8ad535737025b77d76b8b45050fd8 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Fri, 22 Mar 2024 09:11:08 +0100 Subject: [PATCH 26/68] test reshape --- nodegen/node/reshape.py | 120 + src/operators/tensor/core.cairo | 5 + tests/lib.cairo | 12 +- tests/nodes.cairo | 2095 +++++++++-------- tests/nodes/reshape_extended_dims.cairo | 20 + .../nodes/reshape_extended_dims/input_0.cairo | 38 + .../reshape_extended_dims/output_0.cairo | 39 + tests/nodes/reshape_negative_dim.cairo | 20 + .../nodes/reshape_negative_dim/input_0.cairo | 38 + .../nodes/reshape_negative_dim/output_0.cairo | 38 + .../reshape_negative_extended_dims.cairo | 20 + .../input_0.cairo | 38 + .../output_0.cairo | 39 + tests/nodes/reshape_one_dim.cairo | 20 + tests/nodes/reshape_one_dim/input_0.cairo | 38 + tests/nodes/reshape_one_dim/output_0.cairo | 36 + tests/nodes/reshape_reduced_dims.cairo | 20 + .../nodes/reshape_reduced_dims/input_0.cairo | 38 + .../nodes/reshape_reduced_dims/output_0.cairo | 37 + tests/nodes/reshape_reordered_all_dims.cairo | 20 + .../reshape_reordered_all_dims/input_0.cairo | 38 + .../reshape_reordered_all_dims/output_0.cairo | 38 + tests/nodes/reshape_reordered_last_dims.cairo | 20 + .../reshape_reordered_last_dims/input_0.cairo | 38 + .../output_0.cairo | 38 + .../nodes/reshape_zero_and_negative_dim.cairo | 20 + .../input_0.cairo | 38 + .../output_0.cairo | 39 + tests/nodes/reshape_zero_dim.cairo | 20 + tests/nodes/reshape_zero_dim/input_0.cairo | 38 + tests/nodes/reshape_zero_dim/output_0.cairo | 39 + 31 files changed, 2048 insertions(+), 1049 deletions(-) create mode 100644 nodegen/node/reshape.py create mode 100644 tests/nodes/reshape_extended_dims.cairo create mode 100644 tests/nodes/reshape_extended_dims/input_0.cairo create mode 100644 tests/nodes/reshape_extended_dims/output_0.cairo create mode 100644 tests/nodes/reshape_negative_dim.cairo create mode 100644 tests/nodes/reshape_negative_dim/input_0.cairo create mode 100644 tests/nodes/reshape_negative_dim/output_0.cairo create mode 100644 tests/nodes/reshape_negative_extended_dims.cairo create mode 100644 tests/nodes/reshape_negative_extended_dims/input_0.cairo create mode 100644 tests/nodes/reshape_negative_extended_dims/output_0.cairo create mode 100644 tests/nodes/reshape_one_dim.cairo create mode 100644 tests/nodes/reshape_one_dim/input_0.cairo create mode 100644 tests/nodes/reshape_one_dim/output_0.cairo create mode 100644 tests/nodes/reshape_reduced_dims.cairo create mode 100644 tests/nodes/reshape_reduced_dims/input_0.cairo create mode 100644 tests/nodes/reshape_reduced_dims/output_0.cairo create mode 100644 tests/nodes/reshape_reordered_all_dims.cairo create mode 100644 tests/nodes/reshape_reordered_all_dims/input_0.cairo create mode 100644 tests/nodes/reshape_reordered_all_dims/output_0.cairo create mode 100644 tests/nodes/reshape_reordered_last_dims.cairo create mode 100644 tests/nodes/reshape_reordered_last_dims/input_0.cairo create mode 100644 tests/nodes/reshape_reordered_last_dims/output_0.cairo create mode 100644 tests/nodes/reshape_zero_and_negative_dim.cairo create mode 100644 tests/nodes/reshape_zero_and_negative_dim/input_0.cairo create mode 100644 tests/nodes/reshape_zero_and_negative_dim/output_0.cairo create mode 100644 tests/nodes/reshape_zero_dim.cairo create mode 100644 tests/nodes/reshape_zero_dim/input_0.cairo create mode 100644 tests/nodes/reshape_zero_dim/output_0.cairo diff --git a/nodegen/node/reshape.py b/nodegen/node/reshape.py new file mode 100644 index 000000000..e2601cf14 --- /dev/null +++ b/nodegen/node/reshape.py @@ -0,0 +1,120 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, Tensor, Dtype + +original_shape = [2, 3, 4] +data = np.random.random_sample(original_shape).astype(np.int32) + + +def reshape_reference_implementation( + data: np.ndarray, shape: np.ndarray, allowzero: int = 0 +) -> np.ndarray: + # replace zeros with corresponding dim size + # we need to do this because np.reshape doesn't support 0 by default unless 'allowzero' is set + new_shape = np.copy(shape) + if allowzero == 0: + zeros_index = np.where(shape == 0) + new_shape[zeros_index] = np.array(data.shape)[zeros_index] + reshaped = np.reshape(data, new_shape) + return reshaped + + +class Reshape(RunAll): + @staticmethod + def reshape_reordered_all_dims(): + y = reshape_reference_implementation( + data, np.array([4, 2, 3], dtype=np.int64)) + + x = Tensor(Dtype.I32, data.shape, data.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "reshape_reordered_all_dims" + make_test([x], y, "input_0.reshape(array![4,2,3].span())", name) + + @staticmethod + def reshape_reordered_last_dims(): + y = reshape_reference_implementation( + data, np.array([2, 4, 3], dtype=np.int64)) + + x = Tensor(Dtype.I32, data.shape, data.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "reshape_reordered_last_dims" + make_test([x], y, "input_0.reshape(array![2,4,3].span())", name) + + @staticmethod + def reshape_reduced_dims(): + y = reshape_reference_implementation( + data, np.array([2, 12], dtype=np.int64)) + + x = Tensor(Dtype.I32, data.shape, data.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "reshape_reduced_dims" + make_test([x], y, "input_0.reshape(array![2,12].span())", name) + + @staticmethod + def reshape_extended_dims(): + y = reshape_reference_implementation( + data, np.array([2, 3, 2, 2], dtype=np.int64)) + + x = Tensor(Dtype.I32, data.shape, data.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "reshape_extended_dims" + make_test([x], y, "input_0.reshape(array![2, 3, 2, 2].span())", name) + + @staticmethod + def reshape_one_dim(): + y = reshape_reference_implementation( + data, np.array([24], dtype=np.int64)) + + x = Tensor(Dtype.I32, data.shape, data.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "reshape_one_dim" + make_test([x], y, "input_0.reshape(array![24].span())", name) + + @staticmethod + def reshape_negative_dim(): + y = reshape_reference_implementation( + data, np.array([2, -1, 2], dtype=np.int64)) + + x = Tensor(Dtype.I32, data.shape, data.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "reshape_negative_dim" + make_test([x], y, "input_0.reshape(array![2, -1, 2].span())", name) + + @staticmethod + def reshape_negative_extended_dims(): + y = reshape_reference_implementation( + data, np.array([-1, 2, 3, 4], dtype=np.int64)) + + x = Tensor(Dtype.I32, data.shape, data.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "reshape_negative_extended_dims" + make_test([x], y, "input_0.reshape(array![-1, 2, 3, 4].span())", name) + + @staticmethod + def reshape_zero_dim(): + y = reshape_reference_implementation( + data, np.array([2, 0, 4, 1], dtype=np.int64)) + + x = Tensor(Dtype.I32, data.shape, data.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "reshape_zero_dim" + make_test([x], y, "input_0.reshape(array![2, 0, 4, 1].span())", name) + + @staticmethod + def reshape_zero_and_negative_dim(): + y = reshape_reference_implementation( + data, np.array([2, 0, 1, -1], dtype=np.int64)) + + x = Tensor(Dtype.I32, data.shape, data.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "reshape_zero_and_negative_dim" + make_test([x], y, "input_0.reshape(array![2, 0, 1, -1].span())", name) diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index af20cb446..45c5782e4 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -6032,6 +6032,7 @@ fn reshape>>(self: @Tensor, target_shape: Span) -> Te }, Option::None => { break; } }; + i+=1; }; let mut target_shape_clone = target_shape.clone(); @@ -6043,6 +6044,9 @@ fn reshape>>(self: @Tensor, target_shape: Span) -> Te if *dim == -1 { inferred_shape.append(total_elements / elements_so_far) // Inferred dimension } else if *dim == 0 { + if i >= (*self.shape).len() { + panic!("Dimension out of bounds for using original dimension value"); + } inferred_shape.append(*(*self).shape.at(i)) // Dimension unchanged from original } else { inferred_shape.append((*dim).try_into().unwrap()) @@ -6050,6 +6054,7 @@ fn reshape>>(self: @Tensor, target_shape: Span) -> Te }, Option::None => { break; } } + i+=1; }; new_tensor(inferred_shape.span(), *self.data) diff --git a/tests/lib.cairo b/tests/lib.cairo index eb58139db..f5cecb77d 100644 --- a/tests/lib.cairo +++ b/tests/lib.cairo @@ -1,7 +1,7 @@ -// mod numbers; -// mod performance; -// mod tensor_core; -// mod nodes; -// mod ml; -// mod operators; +mod numbers; +mod performance; +mod tensor_core; +mod nodes; +mod ml; +mod operators; diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 9d921be62..836741819 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1,1043 +1,1052 @@ -// mod abs_fp16x16; -// mod abs_fp8x23; -// mod abs_i32; -// mod abs_i8; -// mod acos_fp16x16; -// mod acos_fp8x23; -// mod acosh_fp16x16; -// mod acosh_fp8x23; -// mod add_fp16x16; -// mod add_fp16x16_broadcast; -// mod add_fp8x23; -// mod add_fp8x23_broadcast; -// mod add_i32; -// mod add_i32_broadcast; -// mod add_i8; -// mod add_i8_broadcast; -// mod add_u32; -// mod add_u32_broadcast; -// mod argmax_fp16x16_1D_default; -// mod argmax_fp16x16_1D_keepdims_false; -// mod argmax_fp16x16_1D_last_index; -// mod argmax_fp16x16_2D_default; -// mod argmax_fp16x16_2D_keepdims_false; -// mod argmax_fp16x16_2D_last_index; -// mod argmax_fp16x16_3D_default; -// mod argmax_fp16x16_3D_keepdims_false; -// mod argmax_fp16x16_3D_last_index; -// mod argmax_fp8x23_1D_default; -// mod argmax_fp8x23_1D_keepdims_false; -// mod argmax_fp8x23_1D_last_index; -// mod argmax_fp8x23_2D_default; -// mod argmax_fp8x23_2D_keepdims_false; -// mod argmax_fp8x23_2D_last_index; -// mod argmax_fp8x23_3D_default; -// mod argmax_fp8x23_3D_keepdims_false; -// mod argmax_fp8x23_3D_last_index; -// mod argmax_i32_1D_default; -// mod argmax_i32_1D_keepdims_false; -// mod argmax_i32_1D_last_index; -// mod argmax_i32_2D_default; -// mod argmax_i32_2D_keepdims_false; -// mod argmax_i32_2D_last_index; -// mod argmax_i32_3D_default; -// mod argmax_i32_3D_keepdims_false; -// mod argmax_i32_3D_last_index; -// mod argmax_i8_1D_default; -// mod argmax_i8_1D_keepdims_false; -// mod argmax_i8_1D_last_index; -// mod argmax_i8_2D_default; -// mod argmax_i8_2D_keepdims_false; -// mod argmax_i8_2D_last_index; -// mod argmax_i8_3D_default; -// mod argmax_i8_3D_keepdims_false; -// mod argmax_i8_3D_last_index; -// mod argmax_u32_1D_default; -// mod argmax_u32_1D_keepdims_false; -// mod argmax_u32_1D_last_index; -// mod argmax_u32_2D_default; -// mod argmax_u32_2D_keepdims_false; -// mod argmax_u32_2D_last_index; -// mod argmax_u32_3D_default; -// mod argmax_u32_3D_keepdims_false; -// mod argmax_u32_3D_last_index; -// mod argmin_fp16x16_1D_default; -// mod argmin_fp16x16_1D_keepdims_false; -// mod argmin_fp16x16_1D_last_index; -// mod argmin_fp16x16_2D_default; -// mod argmin_fp16x16_2D_keepdims_false; -// mod argmin_fp16x16_2D_last_index; -// mod argmin_fp16x16_3D_default; -// mod argmin_fp16x16_3D_keepdims_false; -// mod argmin_fp16x16_3D_last_index; -// mod argmin_fp8x23_1D_default; -// mod argmin_fp8x23_1D_keepdims_false; -// mod argmin_fp8x23_1D_last_index; -// mod argmin_fp8x23_2D_default; -// mod argmin_fp8x23_2D_keepdims_false; -// mod argmin_fp8x23_2D_last_index; -// mod argmin_fp8x23_3D_default; -// mod argmin_fp8x23_3D_keepdims_false; -// mod argmin_fp8x23_3D_last_index; -// mod argmin_i32_1D_default; -// mod argmin_i32_1D_keepdims_false; -// mod argmin_i32_1D_last_index; -// mod argmin_i32_2D_default; -// mod argmin_i32_2D_keepdims_false; -// mod argmin_i32_2D_last_index; -// mod argmin_i32_3D_default; -// mod argmin_i32_3D_keepdims_false; -// mod argmin_i32_3D_last_index; -// mod argmin_i8_1D_default; -// mod argmin_i8_1D_keepdims_false; -// mod argmin_i8_1D_last_index; -// mod argmin_i8_2D_default; -// mod argmin_i8_2D_keepdims_false; -// mod argmin_i8_2D_last_index; -// mod argmin_i8_3D_default; -// mod argmin_i8_3D_keepdims_false; -// mod argmin_i8_3D_last_index; -// mod argmin_u32_1D_default; -// mod argmin_u32_1D_keepdims_false; -// mod argmin_u32_1D_last_index; -// mod argmin_u32_2D_default; -// mod argmin_u32_2D_keepdims_false; -// mod argmin_u32_2D_last_index; -// mod argmin_u32_3D_default; -// mod argmin_u32_3D_keepdims_false; -// mod argmin_u32_3D_last_index; -// mod asin_fp16x16; -// mod asin_fp8x23; -// mod asinh_fp16x16; -// mod asinh_fp8x23; -// mod atan_fp16x16; -// mod atan_fp8x23; -// mod ceil_fp16x16; -// mod ceil_fp8x23; -// mod concat_fp16x16_1d; -// mod concat_fp16x16_2d; -// mod concat_fp16x16_3d_default; -// mod concat_fp16x16_3d_axis_1; -// mod concat_fp16x16_3d_axis_2; -// mod concat_fp16x16_3d_three_tensors_axis_1; -// mod concat_fp16x16_3d_three_tensors_axis_2; -// mod concat_fp8x23_1d; -// mod concat_fp8x23_2d; -// mod concat_fp8x23_3d_default; -// mod concat_fp8x23_3d_axis_1; -// mod concat_fp8x23_3d_axis_2; -// mod concat_fp8x23_3d_three_tensors_axis_1; -// mod concat_fp8x23_3d_three_tensors_axis_2; -// mod concat_i32_1d; -// mod concat_i32_2d; -// mod concat_i32_3d_default; -// mod concat_i32_3d_axis_1; -// mod concat_i32_3d_axis_2; -// mod concat_i32_3d_three_tensors_axis_1; -// mod concat_i32_3d_three_tensors_axis_2; -// mod concat_i8_1d; -// mod concat_i8_2d; -// mod concat_i8_3d_default; -// mod concat_i8_3d_axis_1; -// mod concat_i8_3d_axis_2; -// mod concat_i8_3d_three_tensors_axis_1; -// mod concat_i8_3d_three_tensors_axis_2; -// mod concat_u32_1d; -// mod concat_u32_2d; -// mod concat_u32_3d_default; -// mod concat_u32_3d_axis_1; -// mod concat_u32_3d_axis_2; -// mod concat_u32_3d_three_tensors_axis_1; -// mod concat_u32_3d_three_tensors_axis_2; -// mod cos_fp16x16; -// mod cos_fp8x23; -// mod cosh_fp16x16; -// mod cosh_fp8x23; -// mod cumsum_fp16x16_1d_default; -// mod cumsum_fp16x16_1d_exclusive; -// mod cumsum_fp16x16_1d_reverse; -// mod cumsum_fp16x16_1d_reverse_exclusive; -// mod cumsum_fp16x16_2d_axis_0; -// mod cumsum_fp16x16_2d_axis_1; -// mod cumsum_fp8x23_1d_default; -// mod cumsum_fp8x23_1d_exclusive; -// mod cumsum_fp8x23_1d_reverse; -// mod cumsum_fp8x23_1d_reverse_exclusive; -// mod cumsum_fp8x23_2d_axis_0; -// mod cumsum_fp8x23_2d_axis_1; -// mod cumsum_i32_1d_default; -// mod cumsum_i32_1d_exclusive; -// mod cumsum_i32_1d_reverse; -// mod cumsum_i32_1d_reverse_exclusive; -// mod cumsum_i32_2d_axis_0; -// mod cumsum_i32_2d_axis_1; -// mod cumsum_i8_1d_default; -// mod cumsum_i8_1d_exclusive; -// mod cumsum_i8_1d_reverse; -// mod cumsum_i8_1d_reverse_exclusive; -// mod cumsum_i8_2d_axis_0; -// mod cumsum_i8_2d_axis_1; -// mod cumsum_u32_1d_default; -// mod cumsum_u32_1d_exclusive; -// mod cumsum_u32_1d_reverse; -// mod cumsum_u32_1d_reverse_exclusive; -// mod cumsum_u32_2d_axis_0; -// mod cumsum_u32_2d_axis_1; -// mod div_fp16x16; -// mod div_fp16x16_broadcast; -// mod div_fp8x23; -// mod div_fp8x23_broadcast; -// mod div_i32; -// mod div_i32_broadcast; -// mod div_i8; -// mod div_i8_broadcast; -// mod div_u32; -// mod div_u32_broadcast; -// mod equal_fp16x16; -// mod equal_fp16x16_broadcast; -// mod equal_fp8x23; -// mod equal_fp8x23_broadcast; -// mod equal_i32; -// mod equal_i32_broadcast; -// mod equal_i8; -// mod equal_i8_broadcast; -// mod equal_u32; -// mod equal_u32_broadcast; -// mod exp_fp16x16; -// mod exp_fp8x23; -// mod less_equal_fp16x16; -// mod less_equal_fp16x16_broadcast; -// mod less_equal_fp8x23; -// mod less_equal_fp8x23_broadcast; -// mod less_equal_i32; -// mod less_equal_i32_broadcast; -// mod less_equal_i8; -// mod less_equal_i8_broadcast; -// mod less_equal_u32; -// mod less_equal_u32_broadcast; -// mod greater_fp16x16; -// mod greater_fp16x16_broadcast; -// mod greater_fp8x23; -// mod greater_fp8x23_broadcast; -// mod greater_i32; -// mod greater_i32_broadcast; -// mod greater_i8; -// mod greater_i8_broadcast; -// mod greater_u32; -// mod greater_u32_broadcast; -// mod leaky_relu_fp16x16; -// mod leaky_relu_fp8x23; -// mod linear_fp16x16; -// mod linear_fp8x23; -// mod linear_i32; -// mod linear_i8; -// mod linear_u32; -// mod log_fp16x16; -// mod log_fp8x23; -// mod logsoftmax_fp16x16_axis_0; -// mod logsoftmax_fp16x16_axis_1; -// mod logsoftmax_fp8x23_axis_0; -// mod logsoftmax_fp8x23_axis_1; -// mod matmul_fp16x16_1d; -// mod matmul_fp16x16_2x2; -// mod matmul_fp16x16_2x1; -// mod matmul_fp16x16_1x2; -// mod matmul_fp8x23_1d; -// mod matmul_fp8x23_2x2; -// mod matmul_fp8x23_2x1; -// mod matmul_fp8x23_1x2; -// mod matmul_i32_1d; -// mod matmul_i32_2x2; -// mod matmul_i32_2x1; -// mod matmul_i32_1x2; -// mod matmul_i8_1d; -// mod matmul_i8_2x2; -// mod matmul_i8_2x1; -// mod matmul_i8_1x2; -// mod matmul_u32_1d; -// mod matmul_u32_2x2; -// mod matmul_u32_2x1; -// mod matmul_u32_1x2; -// mod mul_fp16x16; -// mod mul_fp16x16_broadcast; -// mod mul_fp8x23; -// mod mul_fp8x23_broadcast; -// mod mul_i32; -// mod mul_i32_broadcast; -// mod mul_i8; -// mod mul_i8_broadcast; -// mod mul_u32; -// mod mul_u32_broadcast; -// mod or_fp16x16; -// mod or_fp16x16_broadcast; -// mod or_fp8x23; -// mod or_fp8x23_broadcast; -// mod or_i32; -// mod or_i32_broadcast; -// mod or_i8; -// mod or_i8_broadcast; -// mod or_u32; -// mod or_u32_broadcast; -// mod relu_fp16x16; -// mod relu_fp8x23; -// mod relu_i32; -// mod relu_i8; -// mod sigmoid_fp16x16; -// mod sigmoid_fp8x23; -// mod sin_fp16x16; -// mod sin_fp8x23; -// mod sinh_fp16x16; -// mod sinh_fp8x23; -// mod softmax_fp16x16; -// mod softmax_fp8x23; -// mod softplus_fp8x23; -// mod softplus_fp16x16; -// mod softsign_fp8x23; -// mod softsign_fp16x16; -// mod sqrt_fp16x16; -// mod sqrt_fp8x23; -// mod sub_fp16x16; -// mod sub_fp16x16_broadcast; -// mod sub_fp8x23; -// mod sub_fp8x23_broadcast; -// mod sub_i32; -// mod sub_i32_broadcast; -// mod sub_i8; -// mod sub_i8_broadcast; -// mod sub_u32; -// mod sub_u32_broadcast; -// mod tanh_fp16x16; -// mod tanh_fp8x23; -// mod transpose_fp16x16_2d; -// mod transpose_fp16x16_3d; -// mod transpose_fp8x23_2d; -// mod transpose_fp8x23_3d; -// mod transpose_i32_2d; -// mod transpose_i32_3d; -// mod transpose_i8_2d; -// mod transpose_i8_3d; -// mod transpose_u32_2d; -// mod transpose_u32_3d; -// mod xor_fp16x16; -// mod xor_fp16x16_broadcast; -// mod xor_fp8x23; -// mod xor_fp8x23_broadcast; -// mod xor_i32; -// mod xor_i32_broadcast; -// mod xor_i8; -// mod xor_i8_broadcast; -// mod xor_u32; -// mod xor_u32_broadcast; -// mod less_fp16x16; -// mod less_fp16x16_broadcast; -// mod less_fp8x23; -// mod less_fp8x23_broadcast; -// mod less_i32; -// mod less_i32_broadcast; -// mod less_i8; -// mod less_i8_broadcast; -// mod less_u32; -// mod less_u32_broadcast; -// mod greater_equal_fp16x16; -// mod greater_equal_fp16x16_broadcast; -// mod greater_equal_fp8x23; -// mod greater_equal_fp8x23_broadcast; -// mod greater_equal_i32; -// mod greater_equal_i32_broadcast; -// mod greater_equal_i8; -// mod greater_equal_i8_broadcast; -// mod greater_equal_u32; -// mod greater_equal_u32_broadcast; -// mod slice_fp16x16_2d; -// mod slice_fp16x16_3d; -// mod slice_fp8x23_2d; -// mod slice_fp8x23_3d; -// mod slice_i32_2d; -// mod slice_i32_3d; -// mod slice_i8_2d; -// mod slice_i8_3d; -// mod slice_u32_2d; -// mod slice_u32_3d; -// mod gather_fp8x23_3d_default; -// mod gather_fp8x23_3d_axis1; -// mod gather_fp8x23_3d_axis2; -// mod gather_fp16x16_3d_default; -// mod gather_fp16x16_3d_axis1; -// mod gather_fp16x16_3d_axis2; -// mod gather_i8_3d_default; -// mod gather_i8_3d_axis1; -// mod gather_i8_3d_axis2; -// mod gather_i32_3d_default; -// mod gather_i32_3d_axis1; -// mod gather_i32_3d_axis2; -// mod gather_u32_3d_default; -// mod gather_u32_3d_axis1; -// mod gather_u32_3d_axis2; -// mod nonzero_fp16x16_2d; -// mod nonzero_fp16x16_3d; -// mod nonzero_fp8x23_2d; -// mod nonzero_fp8x23_3d; -// mod nonzero_i32_2d; -// mod nonzero_i32_3d; -// mod nonzero_i8_2d; -// mod nonzero_i8_3d; -// mod nonzero_u32_2d; -// mod nonzero_u32_3d; -// mod squeeze_fP16x16; -// mod squeeze_fP8x23; -// mod squeeze_i32; -// mod squeeze_i8; -// mod squeeze_u32; -// mod unsqueeze_fp16x16_2d; -// mod unsqueeze_fp16x16_3d; -// mod unsqueeze_fp8x23_2d; -// mod unsqueeze_fp8x23_3d; -// mod unsqueeze_i32_2d; -// mod unsqueeze_i32_3d; -// mod unsqueeze_i8_2d; -// mod unsqueeze_i8_3d; -// mod unsqueeze_u32_2d; -// mod unsqueeze_u32_3d; -// mod sign_fP16x16; -// mod sign_fP8x23; -// mod sign_fail; -// mod sign_i32; -// mod sign_i8; -// mod clip_fp16x16_2d; -// mod clip_fp16x16_3d; -// mod clip_fp8x23_2d; -// mod clip_fp8x23_3d; -// mod clip_i32_2d; -// mod clip_i32_3d; -// mod clip_i8_2d; -// mod clip_i8_3d; -// mod clip_u32_2d; -// mod clip_u32_3d; -// mod identity_fP16x16; -// mod identity_fP8x23; -// mod identity_i32; -// mod identity_i8; -// mod identity_u32; -// mod thresholded_relu_fp16x16; -// mod thresholded_relu_fp8x23; -// mod hard_sigmoid_fp8x23; -// mod hard_sigmoid_fp16x16; -// mod neg_fp16x16; -// mod neg_fp8x23; -// mod neg_i32; -// mod neg_i8; -// mod gemm_all_attributes; -// mod gemm_alpha; -// mod gemm_beta; -// mod gemm_default_matrix_bias; -// mod gemm_default_vector_bias; -// mod gemm_default_no_bias; -// mod gemm_transposeA; -// mod gemm_transposeB; -// mod min_fp16x16_three_tensors; -// mod min_fp16x16_broadcast_three_tensors; -// mod min_fp16x16_two_tensors; -// mod min_fp16x16_broadcast_two_tensors; -// mod min_fp8x23_three_tensors; -// mod min_fp8x23_broadcast_three_tensors; -// mod min_fp8x23_two_tensors; -// mod min_fp8x23_broadcast_two_tensors; -// mod min_i32_three_tensors; -// mod min_i32_broadcast_three_tensors; -// mod min_i32_two_tensors; -// mod min_i32_broadcast_two_tensors; -// mod min_i8_three_tensors; -// mod min_i8_broadcast_three_tensors; -// mod min_i8_two_tensors; -// mod min_i8_broadcast_two_tensors; -// mod min_u32_three_tensors; -// mod min_u32_broadcast_three_tensors; -// mod min_u32_two_tensors; -// mod min_u32_broadcast_two_tensors; -// mod where_fp16x16; -// mod where_fp16x16_broadcast; -// mod where_fp8x23; -// mod where_fp8x23_broadcast; -// mod where_i32; -// mod where_i32_broadcast; -// mod where_i8; -// mod where_i8_broadcast; -// mod where_u32; -// mod where_u32_broadcast; -// mod not_bool; -// mod round_fp16x16; -// mod round_fp8x23; -// mod max_fp16x16_three_tensors; -// mod max_fp16x16_broadcast_three_tensors; -// mod max_fp16x16_two_tensors; -// mod max_fp16x16_broadcast_two_tensors; -// mod max_fp8x23_three_tensors; -// mod max_fp8x23_broadcast_three_tensors; -// mod max_fp8x23_two_tensors; -// mod max_fp8x23_broadcast_two_tensors; -// mod max_i32_three_tensors; -// mod max_i32_broadcast_three_tensors; -// mod max_i32_two_tensors; -// mod max_i32_broadcast_two_tensors; -// mod max_i8_three_tensors; -// mod max_i8_broadcast_three_tensors; -// mod max_i8_two_tensors; -// mod max_i8_broadcast_two_tensors; -// mod max_u32_three_tensors; -// mod max_u32_broadcast_three_tensors; -// mod max_u32_two_tensors; -// mod max_u32_broadcast_two_tensors; -// mod scatter_fp16x16_3d_default; -// mod scatter_fp16x16_3d_axis1; -// mod scatter_fp16x16_3d_axis1_add; -// mod scatter_fp8x23_default; -// mod scatter_fp8x23_axis1; -// mod scatter_fp8x23_mul; -// mod scatter_i8_default; -// mod scatter_i8_axis1; -// mod scatter_i8_axis1_max; -// mod scatter_u32_default; -// mod scatter_u32_axis1; -// mod scatter_u32_add; -// mod array_feature_extractor_1D_i32; -// mod array_feature_extractor_1D_fp8x23; -// mod array_feature_extractor_1D_fp16x16; -// mod array_feature_extractor_2D_i32; -// mod array_feature_extractor_2D_fp8x23; -// mod array_feature_extractor_2D_fp16x16; -// mod array_feature_extractor_3D_i32; -// mod array_feature_extractor_3D_fp8x23; -// mod array_feature_extractor_3D_fp16x16; -// mod binarizer_fp16x16; -// mod binarizer_fp8x23; -// mod tril_fp16x16; -// mod tril_fp16x16_neg; -// mod tril_fp16x16_one_row; -// mod tril_fp16x16_out_neg; -// mod tril_fp16x16_out_pos; -// mod tril_fp16x16_pos; -// mod tril_fp16x16_square; -// mod tril_fp16x16_square_neg; -// mod tril_fp16x16_zero; -// mod triu_fp16x16; -// mod triu_fp16x16_neg; -// mod triu_fp16x16_one_row; -// mod triu_fp16x16_out_neg; -// mod triu_fp16x16_out_pos; -// mod triu_fp16x16_pos; -// mod triu_fp16x16_square; -// mod triu_fp16x16_square_neg; -// mod triu_fp16x16_zero; -// mod tril_fp8x23; -// mod tril_fp8x23_neg; -// mod tril_fp8x23_one_row; -// mod tril_fp8x23_out_neg; -// mod tril_fp8x23_out_pos; -// mod tril_fp8x23_pos; -// mod tril_fp8x23_square; -// mod tril_fp8x23_square_neg; -// mod tril_fp8x23_zero; -// mod triu_fp8x23; -// mod triu_fp8x23_neg; -// mod triu_fp8x23_one_row; -// mod triu_fp8x23_out_neg; -// mod triu_fp8x23_out_pos; -// mod triu_fp8x23_pos; -// mod triu_fp8x23_square; -// mod triu_fp8x23_square_neg; -// mod triu_fp8x23_zero; -// mod tril_i32; -// mod tril_neg_i32; -// mod tril_i32_one_row; -// mod tril_i32_out_neg; -// mod tril_i32_out_pos; -// mod tril_i32_pos; -// mod tril_i32_square; -// mod tril_i32_square_neg; -// mod tril_i32_zero; -// mod triu_i32; -// mod triu_i32_neg; -// mod triu_i32_one_row; -// mod triu_i32_out_neg; -// mod triu_i32_out_pos; -// mod triu_i32_pos; -// mod triu_i32_square; -// mod triu_i32_square_neg; -// mod triu_i32_zero; -// mod tril_i8; -// mod tril_i8_neg; -// mod tril_i8_one_row; -// mod tril_i8_out_neg; -// mod tril_i8_out_pos; -// mod tril_i8_pos; -// mod tril_i8_square; -// mod tril_i8_square_neg; -// mod tril_i8_zero; -// mod triu_i8; -// mod triu_i8_neg; -// mod triu_i8_one_row; -// mod triu_i8_out_neg; -// mod triu_i8_out_pos; -// mod triu_i8_pos; -// mod triu_i8_square; -// mod triu_i8_square_neg; -// mod triu_i8_zero; -// mod tril_u32; -// mod tril_u32_neg; -// mod tril_u32_one_row; -// mod tril_u32_out_neg; -// mod tril_u32_out_pos; -// mod tril_u32_pos; -// mod tril_u32_square; -// mod tril_u32_square_neg; -// mod tril_u32_zero; -// mod triu_u32; -// mod triu_u32_neg; -// mod triu_u32_one_row; -// mod triu_u32_out_neg; -// mod triu_u32_out_pos; -// mod triu_u32_pos; -// mod triu_u32_square; -// mod triu_u32_square_neg; -// mod triu_u32_zero; -// mod reduce_sum_square_fp16x16_export_do_not_keepdims; -// mod reduce_sum_square_fp16x16_export_keepdims; -// mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; -// mod reduce_sum_square_fp8x23_export_do_not_keepdims; -// mod reduce_sum_square_fp8x23_export_keepdims; -// mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; -// mod reduce_sum_square_i32_export_do_not_keepdims; -// mod reduce_sum_square_i32_export_keepdims; -// mod reduce_sum_square_i32_export_negative_axes_keepdims; -// mod reduce_sum_square_i8_export_do_not_keepdims; -// mod reduce_sum_square_i8_export_keepdims; -// mod reduce_sum_square_i8_export_negative_axes_keepdims; -// mod reduce_sum_square_u32_export_do_not_keepdims; -// mod reduce_sum_square_u32_export_keepdims; -// mod reduce_sum_square_u32_export_negative_axes_keepdims; -// mod reduce_l2_fp16x16_export_do_not_keepdims; -// mod reduce_l2_fp16x16_export_keepdims; -// mod reduce_l2_fp16x16_export_negative_axes_keepdims; -// mod reduce_l2_fp8x23_export_do_not_keepdims; -// mod reduce_l2_fp8x23_export_keepdims; -// mod reduce_l2_fp8x23_export_negative_axes_keepdims; -// mod reduce_l1_fp16x16_export_do_not_keepdims; -// mod reduce_l1_fp16x16_export_keepdims; -// mod reduce_l1_fp16x16_export_negative_axes_keepdims; -// mod reduce_l1_fp8x23_export_do_not_keepdims; -// mod reduce_l1_fp8x23_export_keepdims; -// mod reduce_l1_fp8x23_export_negative_axes_keepdims; -// mod reduce_l1_i32_export_do_not_keepdims; -// mod reduce_l1_i32_export_keepdims; -// mod reduce_l1_i32_export_negative_axes_keepdims; -// mod reduce_l1_i8_export_do_not_keepdims; -// mod reduce_l1_i8_export_keepdims; -// mod reduce_l1_i8_export_negative_axes_keepdims; -// mod reduce_l1_u32_export_do_not_keepdims; -// mod reduce_l1_u32_export_keepdims; -// mod reduce_l1_u32_export_negative_axes_keepdims; -// mod reduce_prod_fp16x16_1D; -// mod reduce_prod_fp16x16_2D_default; -// mod reduce_prod_fp16x16_2D_keepdims; -// mod reduce_prod_fp16x16_2D_axis_1; -// mod reduce_prod_fp8x23_1D; -// mod reduce_prod_fp8x23_2D_default; -// mod reduce_prod_fp8x23_2D_keepdims; -// mod reduce_prod_fp8x23_2D_axis_1; -// mod reduce_prod_i32_1D; -// mod reduce_prod_i32_2D_default; -// mod reduce_prod_i32_2D_keepdims; -// mod reduce_prod_i32_2D_axis_1; -// mod reduce_prod_i8_1D; -// mod reduce_prod_i8_2D_default; -// mod reduce_prod_i8_2D_keepdims; -// mod reduce_prod_i8_2D_axis_1; -// mod reduce_prod_u32_1D; -// mod reduce_prod_u32_2D_default; -// mod reduce_prod_u32_2D_keepdims; -// mod reduce_prod_u32_2D_axis_1; -// mod gather_elements_fp16x16_3d_default; -// mod gather_elements_fp16x16_3d_axis1; -// mod gather_elements_fp16x16_3d_axis2; -// mod gather_elements_fp8x23_3d_default; -// mod gather_elements_fp8x23_3d_axis1; -// mod gather_elements_fp8x23_3d_axis2; -// mod gather_elements_i8_3d_default; -// mod gather_elements_i8_3d_axis1; -// mod gather_elements_i32_3d_default; -// mod gather_elements_i32_3d_axis1; -// mod gather_elements_i32_3d_axis2; -// mod gather_elements_u32_default; -// mod gather_elements_u32_axis1; -// mod gather_elements_u32_axis2; -// mod gather_elements_u32_axis3; -// mod sequence_length_fp16x16; -// mod sequence_length_fp16x16_broadcast; -// mod sequence_length_fp8x23; -// mod sequence_length_fp8x23_broadcast; -// mod sequence_length_i32; -// mod sequence_length_i32_broadcast; -// mod sequence_length_i8; -// mod sequence_length_i8_broadcast; -// mod sequence_length_u32; -// mod sequence_length_u32_broadcast; -// mod sequence_at_u32_positive; -// mod sequence_at_u32_negative; -// mod sequence_at_fp16x16_positive; -// mod sequence_at_fp16x16_negative; -// mod sequence_at_fp8x23_positive; -// mod sequence_at_fp8x23_negative; -// mod sequence_at_i32_positive; -// mod sequence_at_i32_negative; -// mod sequence_at_i8_positive; -// mod sequence_at_i8_negative; -// mod reduce_min_fp16x16_1D; -// mod reduce_min_fp16x16_2D_default; -// mod reduce_min_fp16x16_2D_keepdims; -// mod reduce_min_fp16x16_2D_axis_1; -// mod reduce_min_fp8x23_1D; -// mod reduce_min_fp8x23_2D_default; -// mod reduce_min_fp8x23_2D_keepdims; -// mod reduce_min_fp8x23_2D_axis_1; -// mod reduce_min_i32_1D; -// mod reduce_min_i32_2D_default; -// mod reduce_min_i32_2D_keepdims; -// mod reduce_min_i32_2D_axis_1; -// mod reduce_min_i8_1D; -// mod reduce_min_i8_2D_default; -// mod reduce_min_i8_2D_keepdims; -// mod reduce_min_i8_2D_axis_1; -// mod reduce_min_u32_1D; -// mod reduce_min_u32_2D_default; -// mod reduce_min_u32_2D_keepdims; -// mod reduce_min_u32_2D_axis_1; -// mod sequence_construct_fp16x16; -// mod sequence_construct_fp8x23; -// mod sequence_construct_i32; -// mod sequence_construct_i8; -// mod sequence_construct_u32; -// mod shrink_hard_fp16x16; -// mod shrink_soft_fp16x16; -// mod shrink_hard_fp8x23; -// mod shrink_soft_fp8x23; -// mod sequence_empty_fp16x16; -// mod sequence_empty_fp8x23; -// mod sequence_empty_i32; -// mod sequence_empty_i8; -// mod sequence_empty_u32; -// mod reduce_mean_fp16x16_1D; -// mod reduce_mean_fp16x16_2D_default; -// mod reduce_mean_fp16x16_2D_keepdims; -// mod reduce_mean_fp16x16_2D_axis_1; -// mod reduce_mean_fp8x23_1D; -// mod reduce_mean_fp8x23_2D_default; -// mod reduce_mean_fp8x23_2D_keepdims; -// mod reduce_mean_fp8x23_2D_axis_1; -// mod reduce_mean_i32_1D; -// mod reduce_mean_i32_2D_default; -// mod reduce_mean_i32_2D_keepdims; -// mod reduce_mean_i32_2D_axis_1; -// mod reduce_mean_i8_1D; -// mod reduce_mean_i8_2D_default; -// mod reduce_mean_i8_2D_keepdims; -// mod reduce_mean_i8_2D_axis_1; -// mod reduce_mean_u32_1D; -// mod reduce_mean_u32_2D_default; -// mod reduce_mean_u32_2D_keepdims; -// mod reduce_mean_u32_2D_axis_1; -// mod pow_fp16x16; -// mod pow_fp16x16_broadcast; -// mod pow_fp8x23; -// mod pow_fp8x23_broadcast; -// mod sequence_erase_u32_positive; -// mod sequence_erase_u32_negative; -// mod sequence_erase_u32_empty; -// mod sequence_erase_fp16x16_positive; -// mod sequence_erase_fp16x16_negative; -// mod sequence_erase_fp16x16_empty; -// mod sequence_erase_fp8x23_positive; -// mod sequence_erase_fp8x23_negative; -// mod sequence_erase_fp8x23_empty; -// mod sequence_erase_i32_positive; -// mod sequence_erase_i32_negative; -// mod sequence_erase_i32_empty; -// mod sequence_erase_i8_positive; -// mod sequence_erase_i8_negative; -// mod sequence_erase_i8_empty; -// mod sequence_insert_fp16x16; -// mod sequence_insert_fp8x23; -// mod sequence_insert_i32; -// mod sequence_insert_i8; -// mod sequence_insert_u32; -// mod concat_from_sequence_fp8x23_new_axis_zero; -// mod concat_from_sequence_fp8x23_new_axis_one; -// mod concat_from_sequence_fp8x23_new_axis_default; -// mod concat_from_sequence_fp16x16_new_axis_zero; -// mod concat_from_sequence_fp16x16_new_axis_one; -// mod concat_from_sequence_fp16x16_new_axis_default; -// mod concat_from_sequence_i32_new_axis_zero; -// mod concat_from_sequence_i32_new_axis_one; -// mod concat_from_sequence_i32_new_axis_default; -// mod concat_from_sequence_i8_new_axis_zero; -// mod concat_from_sequence_i8_new_axis_one; -// mod concat_from_sequence_i8_new_axis_default; -// mod concat_from_sequence_u32_new_axis_zero; -// mod concat_from_sequence_u32_new_axis_one; -// mod concat_from_sequence_u32_new_axis_default; -// // mod is_nan_fp16x16; -// // mod is_inf_i32; -// // mod is_pos_inf_i32; -// // mod is_neg_inf_i32; -// mod reduce_log_sum_fp8x23_export_do_not_keepdims; -// mod reduce_log_sum_fp8x23_export_keepdims; -// mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; -// mod reduce_log_sum_fp16x16_export_do_not_keepdims; -// mod reduce_log_sum_fp16x16_export_keepdims; -// mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; -// mod and_bool; -// mod erf_fp16x16; -// mod erf_fp8x23; -// mod unique_fp16x16_without_axis_sorted; -// mod unique_fp16x16_with_axis_zero_sorted; -// mod unique_u32_without_axis_sorted; -// mod unique_u32_without_axis_not_sorted; -// mod unique_u32_with_axis_zero_sorted; -// mod unique_u32_with_axis_zero_not_sorted; -// mod unique_u32_with_axis_one_sorted; -// mod unique_u32_with_axis_one_not_sorted; -// mod gather_nd_fp16x16_3d_default; -// mod gather_nd_fp16x16_3d_batch_dims1; -// mod gather_nd_fp16x16_3d_batch_dims2; -// mod gather_nd_fp8x23_3d_default; -// mod gather_nd_fp8x23_3d_batch_dims1; -// mod gather_nd_fp8x23_3d_batch_dims2; -// mod gather_nd_i32_3d_default; -// mod gather_nd_i32_3d_batch_dims1; -// mod gather_nd_i32_3d_batch_dims2; -// mod gather_nd_i8_3d_default; -// mod gather_nd_i8_3d_batch_dims1; -// mod gather_nd_u32_default; -// mod gather_nd_u32_batch_dims1; -// mod gather_nd_u32_batch_dims2; -// mod resize_upsample_scales_nearest; -// mod resize_downsample_scales_cubic; -// mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; -// mod resize_downsample_scales_cubic_align_corners; -// mod resize_upsample_scales_linear; -// mod resize_downsample_scales_linear_align_corners; -// mod resize_downsample_scales_nearest; -// mod resize_upsample_scales_cubic; -// mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; -// mod resize_upsample_scales_cubic_align_corners; -// mod resize_upsample_scales_cubic_asymmetric; -// mod resize_upsample_scales_linear_align_corners; -// mod resize_upsample_sizes_nearest; -// mod resize_upsample_sizes_cubic; -// mod resize_downsample_sizes_cubic; -// mod resize_downsample_sizes_nearest; -// mod resize_upsample_scales_linear_half_pixel_symmetric; -// mod resize_downsample_scales_cubic_antialias; -// mod resize_downsample_scales_linear_antialias; -// mod resize_downsample_sizes_cubic_antialias; -// mod resize_downsample_sizes_linear_pytorch_half_pixel; -// mod resize_tf_crop_and_resize; -// mod resize_tf_crop_and_resize_extrapolation_value; -// mod resize_upsample_scales_nearest_axes_2_3; -// mod resize_upsample_scales_nearest_axes_3_2; -// mod resize_upsample_sizes_nearest_axes_2_3; -// mod resize_upsample_sizes_nearest_ceil_half_pixel; -// mod resize_upsample_sizes_nearest_floor_align_corners; -// mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; -// mod resize_downsample_scales_linear_half_pixel_symmetric; -// mod resize_downsample_sizes_nearest_not_larger; -// mod resize_downsample_sizes_nearest_not_smaller; -// mod resize_tf_crop_and_resize_axes_2_3; -// mod resize_tf_crop_and_resize_axes_3_2; -// mod resize_upsample_sizes_nearest_axes_3_2; -// mod resize_upsample_sizes_nearest_not_larger; -// mod resize_upsample_sizes_nearest_not_smaller; -// mod compress_fp16x16_3d_default; -// mod compress_fp16x16_3d_axis1; -// mod compress_fp16x16_3d_axis2; -// mod compress_fp16x16_3d_axis3; -// mod compress_fp16x16_3d_noaxis; -// mod compress_fp8x23_3d_default; -// mod compress_fp8x23_3d_axis1; -// mod compress_fp8x23_3d_axis2; -// mod compress_i32_3d_default; -// mod compress_i32_3d_axis1; -// mod compress_i32_3d_axis2; -// mod compress_i8_3d_default; -// mod compress_i8_3d_axis1; -// mod compress_i8_3d_axis2; -// mod compress_u32_3d_default; -// mod compress_u32_3d_axis1; -// mod compress_u32_3d_axis2; -// mod compress_u32_3d_axis2_2; -// mod compress_u32_3d_axis3; -// mod reduce_log_sum_exp_fp32x32_export_do_not_keepdims; -// mod reduce_log_sum_exp_fp32x32_export_keepdims; -// mod reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims; -// mod layer_normalization_default_axis; -// mod layer_normalization_4d_axis0; -// mod layer_normalization_4d_axis1; -// mod layer_normalization_4d_axis2; -// mod layer_normalization_4d_axis3; -// mod layer_normalization_3d_axis0_epsilon; -// mod layer_normalization_3d_axis_negative_3_epsilon; -// mod layer_normalization_3d_axis1_epsilon; -// mod layer_normalization_3d_axis2_epsilon; -// mod layer_normalization_4d_axis_negative_4; -// mod layer_normalization_4d_axis_negative_3; -// mod layer_normalization_4d_axis_negative_2; -// mod layer_normalization_4d_axis_negative_1; -// mod layer_normalization_3d_axis_negative_2_epsilon; -// mod layer_normalization_3d_axis_negative_1_epsilon; -// mod layer_normalization_test; -// mod split_u32_1d_equal_parts; -// mod split_u32_2d_equal_parts; -// mod split_u32_zero_size; -// mod split_u32_1d_variable_parts; -// mod split_u32_2d_variable_parts; -// mod split_u32_1d_uneven; -// mod split_u32_2d_uneven; -// mod split_fp16x16_1d_equal_parts; -// mod split_fp16x16_1d_variable_parts; -// mod split_fp16x16_2d_equal_parts; -// mod split_fp16x16_2d_variable_parts; -// mod split_fp16x16_zero_size; -// mod split_fp16x16_1d_uneven; -// mod split_fp16x16_2d_uneven; -// mod grid_sample; -// mod grid_sample_cubic; -// mod grid_sample_aligncorners; -// mod grid_sample_nearest; -// mod grid_sample_nearest_aligncorner; -// mod grid_sample_padding_border; -// mod grid_sample_padding_reflection; -// mod grid_sample_padding_zeros; -// mod col2im; -// mod col2im_5D; -// mod col2im_dilations; -// mod col2im_pads; -// mod col2im_strides; -// mod random_uniform_like_fp16x16; -// mod random_uniform_like_fp8x23; -// mod range_fp8x23; -// mod range_fp16x16; -// mod range_i32; -// mod range_i8; -// mod range_u32; -// mod hann_window_fp8x23; -// mod hann_window_fp16x16; -// mod hamming_window_fp16x16; -// mod hamming_window_fp8x23; -// mod blackman_window_fp16x16; -// mod blackman_window_fp8x23; -// mod split_to_sequence_fp16x16_1d_equal_parts; -// mod split_to_sequence_fp16x16_1d_variable_parts; -// mod split_to_sequence_fp16x16_2d_equal_parts; -// mod split_to_sequence_fp16x16_2d_variable_parts; -// mod split_to_sequence_fp16x16_zero_size; -// mod split_to_sequence_fp16x16_1d_uneven; -// mod split_to_sequence_fp16x16_2d_uneven; -// mod split_to_sequence_u32_1d_equal_parts; -// mod split_to_sequence_u32_1d_variable_parts; -// mod split_to_sequence_u32_2d_equal_parts; -// mod split_to_sequence_u32_2d_variable_parts; -// mod split_to_sequence_u32_zero_size; -// mod split_to_sequence_u32_1d_uneven; -// mod split_to_sequence_u32_2d_uneven; -// mod split_to_sequence_2d_scalar; -// mod split_to_sequence_2d_nokeepdims; -// mod split_to_sequence_1d_nokeepdims; -// mod reverse_sequence_fp16x16_batch_equal_parts; -// mod reverse_sequence_fp16x16_time_equal_parts; -// mod reverse_sequence_i32_batch_equal_parts; -// mod reverse_sequence_i32_time_equal_parts; -// mod reverse_sequence_i8_batch_equal_parts; -// mod reverse_sequence_i8_time_equal_parts; -// mod reverse_sequence_u32_4x4_batch; -// mod reverse_sequence_u32_4x4_time; -// mod reverse_sequence_u32_3x3_batch; -// mod reverse_sequence_u32_3x3_time; -// mod reverse_sequence_different_dimensions_4_5; -// mod reverse_sequence_different_dimensions_2_4; -// mod reverse_sequence_different_dimensions_1_6; -// mod reverse_sequence_different_dimensions_3x9_batch; -// mod reverse_sequence_different_dimensions_3x9_time; -// mod conv_transpose; -// mod conv_transpose_1d; -// mod conv_transpose_3d; -// mod conv_transpose_attributes; -// mod conv_transpose_autopad_same; -// mod conv_transpose_dilations; -// mod conv_transpose_pads; -// mod conv_transpose_group_2; -// mod conv_transpose_group_2_image_3; -// mod depth_to_space_fp16x16; -// mod depth_to_space_fp8x23; -// mod depth_to_space_i32; -// mod depth_to_space_i8; -// mod depth_to_space_u32; -// mod space_to_depth_fp16x16; -// mod space_to_depth_fp8x23; -// mod space_to_depth_i32; -// mod space_to_depth_i8; -// mod space_to_depth_u32; -// mod scatter_nd_fp16x16_3d_default; -// mod scatter_nd_fp16x16_3d_add; -// mod scatter_nd_fp16x16_3d_mul; -// mod scatter_nd_fp16x16_3d_max; -// mod scatter_nd_fp16x16_3d_min; -// mod scatter_nd_fp8x23_3d_default; -// mod scatter_nd_fp8x23_3d_add; -// mod scatter_nd_fp8x23_3d_mul; -// mod scatter_nd_fp8x23_3d_max; -// mod scatter_nd_fp8x23_3d_min; -// mod scatter_nd_u32_default; -// mod scatter_nd_u32_add; -// mod scatter_nd_u32_mul; -// mod scatter_nd_u32_max; -// mod scatter_nd_u32_min; -// mod conv_2D_with_padding; -// mod conv_1D_no_padding; -// mod conv_1D_with_padding; -// mod conv_3D_no_padding; -// mod conv_3D_with_padding; -// mod conv_4D_no_padding; -// mod conv_2D_with_2_groups; -// mod conv_2D_with_autopad_same; -// mod conv_2D_with_strides_asymmetric_padding; -// mod conv_2D_with_strides_with_padding; -// mod conv_4D_with_padding; -// mod label_encoder_fp16x16_3d_default; -// mod label_encoder_fp8x23_default; -// mod label_encoder_i8_default; -// mod label_encoder_i32_default; -// mod label_encoder_u32_default; -// mod reduce_sum_single_axis_fp16x16_1D; -// mod reduce_sum_single_axis_fp16x16_2D_default; -// mod reduce_sum_single_axis_fp16x16_2D_keepdims; -// mod reduce_sum_single_axis_fp16x16_2D_axis_1; -// mod reduce_sum_single_axis_fp8x23_1D; -// mod reduce_sum_single_axis_fp8x23_2D_default; -// mod reduce_sum_single_axis_fp8x23_2D_keepdims; -// mod reduce_sum_single_axis_fp8x23_2D_axis_1; -// mod reduce_sum_single_axis_i32_1D; -// mod reduce_sum_single_axis_i32_2D_default; -// mod reduce_sum_single_axis_i32_2D_keepdims; -// mod reduce_sum_single_axis_i32_2D_axis_1; -// mod reduce_sum_single_axis_i8_1D; -// mod reduce_sum_single_axis_i8_2D_default; -// mod reduce_sum_single_axis_i8_2D_keepdims; -// mod reduce_sum_single_axis_i8_2D_axis_1; -// mod reduce_sum_single_axis_u32_1D; -// mod reduce_sum_single_axis_u32_2D_default; -// mod reduce_sum_single_axis_u32_2D_keepdims; -// mod reduce_sum_single_axis_u32_2D_axis_1; -// mod reduce_sum_keep_dims; -// mod reduce_sum_no_keep_dims; -// mod reduce_sum_default_axes_keepdims; -// mod reduce_sum_empty_axes_input_noop; -// mod and_bool_broadcast; +mod abs_fp16x16; +mod abs_fp8x23; +mod abs_i32; +mod abs_i8; +mod acos_fp16x16; +mod acos_fp8x23; +mod acosh_fp16x16; +mod acosh_fp8x23; +mod add_fp16x16; +mod add_fp16x16_broadcast; +mod add_fp8x23; +mod add_fp8x23_broadcast; +mod add_i32; +mod add_i32_broadcast; +mod add_i8; +mod add_i8_broadcast; +mod add_u32; +mod add_u32_broadcast; +mod argmax_fp16x16_1D_default; +mod argmax_fp16x16_1D_keepdims_false; +mod argmax_fp16x16_1D_last_index; +mod argmax_fp16x16_2D_default; +mod argmax_fp16x16_2D_keepdims_false; +mod argmax_fp16x16_2D_last_index; +mod argmax_fp16x16_3D_default; +mod argmax_fp16x16_3D_keepdims_false; +mod argmax_fp16x16_3D_last_index; +mod argmax_fp8x23_1D_default; +mod argmax_fp8x23_1D_keepdims_false; +mod argmax_fp8x23_1D_last_index; +mod argmax_fp8x23_2D_default; +mod argmax_fp8x23_2D_keepdims_false; +mod argmax_fp8x23_2D_last_index; +mod argmax_fp8x23_3D_default; +mod argmax_fp8x23_3D_keepdims_false; +mod argmax_fp8x23_3D_last_index; +mod argmax_i32_1D_default; +mod argmax_i32_1D_keepdims_false; +mod argmax_i32_1D_last_index; +mod argmax_i32_2D_default; +mod argmax_i32_2D_keepdims_false; +mod argmax_i32_2D_last_index; +mod argmax_i32_3D_default; +mod argmax_i32_3D_keepdims_false; +mod argmax_i32_3D_last_index; +mod argmax_i8_1D_default; +mod argmax_i8_1D_keepdims_false; +mod argmax_i8_1D_last_index; +mod argmax_i8_2D_default; +mod argmax_i8_2D_keepdims_false; +mod argmax_i8_2D_last_index; +mod argmax_i8_3D_default; +mod argmax_i8_3D_keepdims_false; +mod argmax_i8_3D_last_index; +mod argmax_u32_1D_default; +mod argmax_u32_1D_keepdims_false; +mod argmax_u32_1D_last_index; +mod argmax_u32_2D_default; +mod argmax_u32_2D_keepdims_false; +mod argmax_u32_2D_last_index; +mod argmax_u32_3D_default; +mod argmax_u32_3D_keepdims_false; +mod argmax_u32_3D_last_index; +mod argmin_fp16x16_1D_default; +mod argmin_fp16x16_1D_keepdims_false; +mod argmin_fp16x16_1D_last_index; +mod argmin_fp16x16_2D_default; +mod argmin_fp16x16_2D_keepdims_false; +mod argmin_fp16x16_2D_last_index; +mod argmin_fp16x16_3D_default; +mod argmin_fp16x16_3D_keepdims_false; +mod argmin_fp16x16_3D_last_index; +mod argmin_fp8x23_1D_default; +mod argmin_fp8x23_1D_keepdims_false; +mod argmin_fp8x23_1D_last_index; +mod argmin_fp8x23_2D_default; +mod argmin_fp8x23_2D_keepdims_false; +mod argmin_fp8x23_2D_last_index; +mod argmin_fp8x23_3D_default; +mod argmin_fp8x23_3D_keepdims_false; +mod argmin_fp8x23_3D_last_index; +mod argmin_i32_1D_default; +mod argmin_i32_1D_keepdims_false; +mod argmin_i32_1D_last_index; +mod argmin_i32_2D_default; +mod argmin_i32_2D_keepdims_false; +mod argmin_i32_2D_last_index; +mod argmin_i32_3D_default; +mod argmin_i32_3D_keepdims_false; +mod argmin_i32_3D_last_index; +mod argmin_i8_1D_default; +mod argmin_i8_1D_keepdims_false; +mod argmin_i8_1D_last_index; +mod argmin_i8_2D_default; +mod argmin_i8_2D_keepdims_false; +mod argmin_i8_2D_last_index; +mod argmin_i8_3D_default; +mod argmin_i8_3D_keepdims_false; +mod argmin_i8_3D_last_index; +mod argmin_u32_1D_default; +mod argmin_u32_1D_keepdims_false; +mod argmin_u32_1D_last_index; +mod argmin_u32_2D_default; +mod argmin_u32_2D_keepdims_false; +mod argmin_u32_2D_last_index; +mod argmin_u32_3D_default; +mod argmin_u32_3D_keepdims_false; +mod argmin_u32_3D_last_index; +mod asin_fp16x16; +mod asin_fp8x23; +mod asinh_fp16x16; +mod asinh_fp8x23; +mod atan_fp16x16; +mod atan_fp8x23; +mod ceil_fp16x16; +mod ceil_fp8x23; +mod concat_fp16x16_1d; +mod concat_fp16x16_2d; +mod concat_fp16x16_3d_default; +mod concat_fp16x16_3d_axis_1; +mod concat_fp16x16_3d_axis_2; +mod concat_fp16x16_3d_three_tensors_axis_1; +mod concat_fp16x16_3d_three_tensors_axis_2; +mod concat_fp8x23_1d; +mod concat_fp8x23_2d; +mod concat_fp8x23_3d_default; +mod concat_fp8x23_3d_axis_1; +mod concat_fp8x23_3d_axis_2; +mod concat_fp8x23_3d_three_tensors_axis_1; +mod concat_fp8x23_3d_three_tensors_axis_2; +mod concat_i32_1d; +mod concat_i32_2d; +mod concat_i32_3d_default; +mod concat_i32_3d_axis_1; +mod concat_i32_3d_axis_2; +mod concat_i32_3d_three_tensors_axis_1; +mod concat_i32_3d_three_tensors_axis_2; +mod concat_i8_1d; +mod concat_i8_2d; +mod concat_i8_3d_default; +mod concat_i8_3d_axis_1; +mod concat_i8_3d_axis_2; +mod concat_i8_3d_three_tensors_axis_1; +mod concat_i8_3d_three_tensors_axis_2; +mod concat_u32_1d; +mod concat_u32_2d; +mod concat_u32_3d_default; +mod concat_u32_3d_axis_1; +mod concat_u32_3d_axis_2; +mod concat_u32_3d_three_tensors_axis_1; +mod concat_u32_3d_three_tensors_axis_2; +mod cos_fp16x16; +mod cos_fp8x23; +mod cosh_fp16x16; +mod cosh_fp8x23; +mod cumsum_fp16x16_1d_default; +mod cumsum_fp16x16_1d_exclusive; +mod cumsum_fp16x16_1d_reverse; +mod cumsum_fp16x16_1d_reverse_exclusive; +mod cumsum_fp16x16_2d_axis_0; +mod cumsum_fp16x16_2d_axis_1; +mod cumsum_fp8x23_1d_default; +mod cumsum_fp8x23_1d_exclusive; +mod cumsum_fp8x23_1d_reverse; +mod cumsum_fp8x23_1d_reverse_exclusive; +mod cumsum_fp8x23_2d_axis_0; +mod cumsum_fp8x23_2d_axis_1; +mod cumsum_i32_1d_default; +mod cumsum_i32_1d_exclusive; +mod cumsum_i32_1d_reverse; +mod cumsum_i32_1d_reverse_exclusive; +mod cumsum_i32_2d_axis_0; +mod cumsum_i32_2d_axis_1; +mod cumsum_i8_1d_default; +mod cumsum_i8_1d_exclusive; +mod cumsum_i8_1d_reverse; +mod cumsum_i8_1d_reverse_exclusive; +mod cumsum_i8_2d_axis_0; +mod cumsum_i8_2d_axis_1; +mod cumsum_u32_1d_default; +mod cumsum_u32_1d_exclusive; +mod cumsum_u32_1d_reverse; +mod cumsum_u32_1d_reverse_exclusive; +mod cumsum_u32_2d_axis_0; +mod cumsum_u32_2d_axis_1; +mod div_fp16x16; +mod div_fp16x16_broadcast; +mod div_fp8x23; +mod div_fp8x23_broadcast; +mod div_i32; +mod div_i32_broadcast; +mod div_i8; +mod div_i8_broadcast; +mod div_u32; +mod div_u32_broadcast; +mod equal_fp16x16; +mod equal_fp16x16_broadcast; +mod equal_fp8x23; +mod equal_fp8x23_broadcast; +mod equal_i32; +mod equal_i32_broadcast; +mod equal_i8; +mod equal_i8_broadcast; +mod equal_u32; +mod equal_u32_broadcast; +mod exp_fp16x16; +mod exp_fp8x23; +mod less_equal_fp16x16; +mod less_equal_fp16x16_broadcast; +mod less_equal_fp8x23; +mod less_equal_fp8x23_broadcast; +mod less_equal_i32; +mod less_equal_i32_broadcast; +mod less_equal_i8; +mod less_equal_i8_broadcast; +mod less_equal_u32; +mod less_equal_u32_broadcast; +mod greater_fp16x16; +mod greater_fp16x16_broadcast; +mod greater_fp8x23; +mod greater_fp8x23_broadcast; +mod greater_i32; +mod greater_i32_broadcast; +mod greater_i8; +mod greater_i8_broadcast; +mod greater_u32; +mod greater_u32_broadcast; +mod leaky_relu_fp16x16; +mod leaky_relu_fp8x23; +mod linear_fp16x16; +mod linear_fp8x23; +mod linear_i32; +mod linear_i8; +mod linear_u32; +mod log_fp16x16; +mod log_fp8x23; +mod logsoftmax_fp16x16_axis_0; +mod logsoftmax_fp16x16_axis_1; +mod logsoftmax_fp8x23_axis_0; +mod logsoftmax_fp8x23_axis_1; +mod matmul_fp16x16_1d; +mod matmul_fp16x16_2x2; +mod matmul_fp16x16_2x1; +mod matmul_fp16x16_1x2; +mod matmul_fp8x23_1d; +mod matmul_fp8x23_2x2; +mod matmul_fp8x23_2x1; +mod matmul_fp8x23_1x2; +mod matmul_i32_1d; +mod matmul_i32_2x2; +mod matmul_i32_2x1; +mod matmul_i32_1x2; +mod matmul_i8_1d; +mod matmul_i8_2x2; +mod matmul_i8_2x1; +mod matmul_i8_1x2; +mod matmul_u32_1d; +mod matmul_u32_2x2; +mod matmul_u32_2x1; +mod matmul_u32_1x2; +mod mul_fp16x16; +mod mul_fp16x16_broadcast; +mod mul_fp8x23; +mod mul_fp8x23_broadcast; +mod mul_i32; +mod mul_i32_broadcast; +mod mul_i8; +mod mul_i8_broadcast; +mod mul_u32; +mod mul_u32_broadcast; +mod or_fp16x16; +mod or_fp16x16_broadcast; +mod or_fp8x23; +mod or_fp8x23_broadcast; +mod or_i32; +mod or_i32_broadcast; +mod or_i8; +mod or_i8_broadcast; +mod or_u32; +mod or_u32_broadcast; +mod relu_fp16x16; +mod relu_fp8x23; +mod relu_i32; +mod relu_i8; +mod sigmoid_fp16x16; +mod sigmoid_fp8x23; +mod sin_fp16x16; +mod sin_fp8x23; +mod sinh_fp16x16; +mod sinh_fp8x23; +mod softmax_fp16x16; +mod softmax_fp8x23; +mod softplus_fp8x23; +mod softplus_fp16x16; +mod softsign_fp8x23; +mod softsign_fp16x16; +mod sqrt_fp16x16; +mod sqrt_fp8x23; +mod sub_fp16x16; +mod sub_fp16x16_broadcast; +mod sub_fp8x23; +mod sub_fp8x23_broadcast; +mod sub_i32; +mod sub_i32_broadcast; +mod sub_i8; +mod sub_i8_broadcast; +mod sub_u32; +mod sub_u32_broadcast; +mod tanh_fp16x16; +mod tanh_fp8x23; +mod transpose_fp16x16_2d; +mod transpose_fp16x16_3d; +mod transpose_fp8x23_2d; +mod transpose_fp8x23_3d; +mod transpose_i32_2d; +mod transpose_i32_3d; +mod transpose_i8_2d; +mod transpose_i8_3d; +mod transpose_u32_2d; +mod transpose_u32_3d; +mod xor_fp16x16; +mod xor_fp16x16_broadcast; +mod xor_fp8x23; +mod xor_fp8x23_broadcast; +mod xor_i32; +mod xor_i32_broadcast; +mod xor_i8; +mod xor_i8_broadcast; +mod xor_u32; +mod xor_u32_broadcast; +mod less_fp16x16; +mod less_fp16x16_broadcast; +mod less_fp8x23; +mod less_fp8x23_broadcast; +mod less_i32; +mod less_i32_broadcast; +mod less_i8; +mod less_i8_broadcast; +mod less_u32; +mod less_u32_broadcast; +mod greater_equal_fp16x16; +mod greater_equal_fp16x16_broadcast; +mod greater_equal_fp8x23; +mod greater_equal_fp8x23_broadcast; +mod greater_equal_i32; +mod greater_equal_i32_broadcast; +mod greater_equal_i8; +mod greater_equal_i8_broadcast; +mod greater_equal_u32; +mod greater_equal_u32_broadcast; +mod slice_fp16x16_2d; +mod slice_fp16x16_3d; +mod slice_fp8x23_2d; +mod slice_fp8x23_3d; +mod slice_i32_2d; +mod slice_i32_3d; +mod slice_i8_2d; +mod slice_i8_3d; +mod slice_u32_2d; +mod slice_u32_3d; +mod gather_fp8x23_3d_default; +mod gather_fp8x23_3d_axis1; +mod gather_fp8x23_3d_axis2; +mod gather_fp16x16_3d_default; +mod gather_fp16x16_3d_axis1; +mod gather_fp16x16_3d_axis2; +mod gather_i8_3d_default; +mod gather_i8_3d_axis1; +mod gather_i8_3d_axis2; +mod gather_i32_3d_default; +mod gather_i32_3d_axis1; +mod gather_i32_3d_axis2; +mod gather_u32_3d_default; +mod gather_u32_3d_axis1; +mod gather_u32_3d_axis2; +mod nonzero_fp16x16_2d; +mod nonzero_fp16x16_3d; +mod nonzero_fp8x23_2d; +mod nonzero_fp8x23_3d; +mod nonzero_i32_2d; +mod nonzero_i32_3d; +mod nonzero_i8_2d; +mod nonzero_i8_3d; +mod nonzero_u32_2d; +mod nonzero_u32_3d; +mod squeeze_fP16x16; +mod squeeze_fP8x23; +mod squeeze_i32; +mod squeeze_i8; +mod squeeze_u32; +mod unsqueeze_fp16x16_2d; +mod unsqueeze_fp16x16_3d; +mod unsqueeze_fp8x23_2d; +mod unsqueeze_fp8x23_3d; +mod unsqueeze_i32_2d; +mod unsqueeze_i32_3d; +mod unsqueeze_i8_2d; +mod unsqueeze_i8_3d; +mod unsqueeze_u32_2d; +mod unsqueeze_u32_3d; +mod sign_fP16x16; +mod sign_fP8x23; +mod sign_fail; +mod sign_i32; +mod sign_i8; +mod clip_fp16x16_2d; +mod clip_fp16x16_3d; +mod clip_fp8x23_2d; +mod clip_fp8x23_3d; +mod clip_i32_2d; +mod clip_i32_3d; +mod clip_i8_2d; +mod clip_i8_3d; +mod clip_u32_2d; +mod clip_u32_3d; +mod identity_fP16x16; +mod identity_fP8x23; +mod identity_i32; +mod identity_i8; +mod identity_u32; +mod thresholded_relu_fp16x16; +mod thresholded_relu_fp8x23; +mod hard_sigmoid_fp8x23; +mod hard_sigmoid_fp16x16; +mod neg_fp16x16; +mod neg_fp8x23; +mod neg_i32; +mod neg_i8; +mod gemm_all_attributes; +mod gemm_alpha; +mod gemm_beta; +mod gemm_default_matrix_bias; +mod gemm_default_vector_bias; +mod gemm_default_no_bias; +mod gemm_transposeA; +mod gemm_transposeB; +mod min_fp16x16_three_tensors; +mod min_fp16x16_broadcast_three_tensors; +mod min_fp16x16_two_tensors; +mod min_fp16x16_broadcast_two_tensors; +mod min_fp8x23_three_tensors; +mod min_fp8x23_broadcast_three_tensors; +mod min_fp8x23_two_tensors; +mod min_fp8x23_broadcast_two_tensors; +mod min_i32_three_tensors; +mod min_i32_broadcast_three_tensors; +mod min_i32_two_tensors; +mod min_i32_broadcast_two_tensors; +mod min_i8_three_tensors; +mod min_i8_broadcast_three_tensors; +mod min_i8_two_tensors; +mod min_i8_broadcast_two_tensors; +mod min_u32_three_tensors; +mod min_u32_broadcast_three_tensors; +mod min_u32_two_tensors; +mod min_u32_broadcast_two_tensors; +mod where_fp16x16; +mod where_fp16x16_broadcast; +mod where_fp8x23; +mod where_fp8x23_broadcast; +mod where_i32; +mod where_i32_broadcast; +mod where_i8; +mod where_i8_broadcast; +mod where_u32; +mod where_u32_broadcast; +mod not_bool; +mod round_fp16x16; +mod round_fp8x23; +mod max_fp16x16_three_tensors; +mod max_fp16x16_broadcast_three_tensors; +mod max_fp16x16_two_tensors; +mod max_fp16x16_broadcast_two_tensors; +mod max_fp8x23_three_tensors; +mod max_fp8x23_broadcast_three_tensors; +mod max_fp8x23_two_tensors; +mod max_fp8x23_broadcast_two_tensors; +mod max_i32_three_tensors; +mod max_i32_broadcast_three_tensors; +mod max_i32_two_tensors; +mod max_i32_broadcast_two_tensors; +mod max_i8_three_tensors; +mod max_i8_broadcast_three_tensors; +mod max_i8_two_tensors; +mod max_i8_broadcast_two_tensors; +mod max_u32_three_tensors; +mod max_u32_broadcast_three_tensors; +mod max_u32_two_tensors; +mod max_u32_broadcast_two_tensors; +mod scatter_fp16x16_3d_default; +mod scatter_fp16x16_3d_axis1; +mod scatter_fp16x16_3d_axis1_add; +mod scatter_fp8x23_default; +mod scatter_fp8x23_axis1; +mod scatter_fp8x23_mul; +mod scatter_i8_default; +mod scatter_i8_axis1; +mod scatter_i8_axis1_max; +mod scatter_u32_default; +mod scatter_u32_axis1; +mod scatter_u32_add; +mod array_feature_extractor_1D_i32; +mod array_feature_extractor_1D_fp8x23; +mod array_feature_extractor_1D_fp16x16; +mod array_feature_extractor_2D_i32; +mod array_feature_extractor_2D_fp8x23; +mod array_feature_extractor_2D_fp16x16; +mod array_feature_extractor_3D_i32; +mod array_feature_extractor_3D_fp8x23; +mod array_feature_extractor_3D_fp16x16; +mod binarizer_fp16x16; +mod binarizer_fp8x23; +mod tril_fp16x16; +mod tril_fp16x16_neg; +mod tril_fp16x16_one_row; +mod tril_fp16x16_out_neg; +mod tril_fp16x16_out_pos; +mod tril_fp16x16_pos; +mod tril_fp16x16_square; +mod tril_fp16x16_square_neg; +mod tril_fp16x16_zero; +mod triu_fp16x16; +mod triu_fp16x16_neg; +mod triu_fp16x16_one_row; +mod triu_fp16x16_out_neg; +mod triu_fp16x16_out_pos; +mod triu_fp16x16_pos; +mod triu_fp16x16_square; +mod triu_fp16x16_square_neg; +mod triu_fp16x16_zero; +mod tril_fp8x23; +mod tril_fp8x23_neg; +mod tril_fp8x23_one_row; +mod tril_fp8x23_out_neg; +mod tril_fp8x23_out_pos; +mod tril_fp8x23_pos; +mod tril_fp8x23_square; +mod tril_fp8x23_square_neg; +mod tril_fp8x23_zero; +mod triu_fp8x23; +mod triu_fp8x23_neg; +mod triu_fp8x23_one_row; +mod triu_fp8x23_out_neg; +mod triu_fp8x23_out_pos; +mod triu_fp8x23_pos; +mod triu_fp8x23_square; +mod triu_fp8x23_square_neg; +mod triu_fp8x23_zero; +mod tril_i32; +mod tril_neg_i32; +mod tril_i32_one_row; +mod tril_i32_out_neg; +mod tril_i32_out_pos; +mod tril_i32_pos; +mod tril_i32_square; +mod tril_i32_square_neg; +mod tril_i32_zero; +mod triu_i32; +mod triu_i32_neg; +mod triu_i32_one_row; +mod triu_i32_out_neg; +mod triu_i32_out_pos; +mod triu_i32_pos; +mod triu_i32_square; +mod triu_i32_square_neg; +mod triu_i32_zero; +mod tril_i8; +mod tril_i8_neg; +mod tril_i8_one_row; +mod tril_i8_out_neg; +mod tril_i8_out_pos; +mod tril_i8_pos; +mod tril_i8_square; +mod tril_i8_square_neg; +mod tril_i8_zero; +mod triu_i8; +mod triu_i8_neg; +mod triu_i8_one_row; +mod triu_i8_out_neg; +mod triu_i8_out_pos; +mod triu_i8_pos; +mod triu_i8_square; +mod triu_i8_square_neg; +mod triu_i8_zero; +mod tril_u32; +mod tril_u32_neg; +mod tril_u32_one_row; +mod tril_u32_out_neg; +mod tril_u32_out_pos; +mod tril_u32_pos; +mod tril_u32_square; +mod tril_u32_square_neg; +mod tril_u32_zero; +mod triu_u32; +mod triu_u32_neg; +mod triu_u32_one_row; +mod triu_u32_out_neg; +mod triu_u32_out_pos; +mod triu_u32_pos; +mod triu_u32_square; +mod triu_u32_square_neg; +mod triu_u32_zero; +mod reduce_sum_square_fp16x16_export_do_not_keepdims; +mod reduce_sum_square_fp16x16_export_keepdims; +mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; +mod reduce_sum_square_fp8x23_export_do_not_keepdims; +mod reduce_sum_square_fp8x23_export_keepdims; +mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; +mod reduce_sum_square_i32_export_do_not_keepdims; +mod reduce_sum_square_i32_export_keepdims; +mod reduce_sum_square_i32_export_negative_axes_keepdims; +mod reduce_sum_square_i8_export_do_not_keepdims; +mod reduce_sum_square_i8_export_keepdims; +mod reduce_sum_square_i8_export_negative_axes_keepdims; +mod reduce_sum_square_u32_export_do_not_keepdims; +mod reduce_sum_square_u32_export_keepdims; +mod reduce_sum_square_u32_export_negative_axes_keepdims; +mod reduce_l2_fp16x16_export_do_not_keepdims; +mod reduce_l2_fp16x16_export_keepdims; +mod reduce_l2_fp16x16_export_negative_axes_keepdims; +mod reduce_l2_fp8x23_export_do_not_keepdims; +mod reduce_l2_fp8x23_export_keepdims; +mod reduce_l2_fp8x23_export_negative_axes_keepdims; +mod reduce_l1_fp16x16_export_do_not_keepdims; +mod reduce_l1_fp16x16_export_keepdims; +mod reduce_l1_fp16x16_export_negative_axes_keepdims; +mod reduce_l1_fp8x23_export_do_not_keepdims; +mod reduce_l1_fp8x23_export_keepdims; +mod reduce_l1_fp8x23_export_negative_axes_keepdims; +mod reduce_l1_i32_export_do_not_keepdims; +mod reduce_l1_i32_export_keepdims; +mod reduce_l1_i32_export_negative_axes_keepdims; +mod reduce_l1_i8_export_do_not_keepdims; +mod reduce_l1_i8_export_keepdims; +mod reduce_l1_i8_export_negative_axes_keepdims; +mod reduce_l1_u32_export_do_not_keepdims; +mod reduce_l1_u32_export_keepdims; +mod reduce_l1_u32_export_negative_axes_keepdims; +mod reduce_prod_fp16x16_1D; +mod reduce_prod_fp16x16_2D_default; +mod reduce_prod_fp16x16_2D_keepdims; +mod reduce_prod_fp16x16_2D_axis_1; +mod reduce_prod_fp8x23_1D; +mod reduce_prod_fp8x23_2D_default; +mod reduce_prod_fp8x23_2D_keepdims; +mod reduce_prod_fp8x23_2D_axis_1; +mod reduce_prod_i32_1D; +mod reduce_prod_i32_2D_default; +mod reduce_prod_i32_2D_keepdims; +mod reduce_prod_i32_2D_axis_1; +mod reduce_prod_i8_1D; +mod reduce_prod_i8_2D_default; +mod reduce_prod_i8_2D_keepdims; +mod reduce_prod_i8_2D_axis_1; +mod reduce_prod_u32_1D; +mod reduce_prod_u32_2D_default; +mod reduce_prod_u32_2D_keepdims; +mod reduce_prod_u32_2D_axis_1; +mod gather_elements_fp16x16_3d_default; +mod gather_elements_fp16x16_3d_axis1; +mod gather_elements_fp16x16_3d_axis2; +mod gather_elements_fp8x23_3d_default; +mod gather_elements_fp8x23_3d_axis1; +mod gather_elements_fp8x23_3d_axis2; +mod gather_elements_i8_3d_default; +mod gather_elements_i8_3d_axis1; +mod gather_elements_i32_3d_default; +mod gather_elements_i32_3d_axis1; +mod gather_elements_i32_3d_axis2; +mod gather_elements_u32_default; +mod gather_elements_u32_axis1; +mod gather_elements_u32_axis2; +mod gather_elements_u32_axis3; +mod sequence_length_fp16x16; +mod sequence_length_fp16x16_broadcast; +mod sequence_length_fp8x23; +mod sequence_length_fp8x23_broadcast; +mod sequence_length_i32; +mod sequence_length_i32_broadcast; +mod sequence_length_i8; +mod sequence_length_i8_broadcast; +mod sequence_length_u32; +mod sequence_length_u32_broadcast; +mod sequence_at_u32_positive; +mod sequence_at_u32_negative; +mod sequence_at_fp16x16_positive; +mod sequence_at_fp16x16_negative; +mod sequence_at_fp8x23_positive; +mod sequence_at_fp8x23_negative; +mod sequence_at_i32_positive; +mod sequence_at_i32_negative; +mod sequence_at_i8_positive; +mod sequence_at_i8_negative; +mod reduce_min_fp16x16_1D; +mod reduce_min_fp16x16_2D_default; +mod reduce_min_fp16x16_2D_keepdims; +mod reduce_min_fp16x16_2D_axis_1; +mod reduce_min_fp8x23_1D; +mod reduce_min_fp8x23_2D_default; +mod reduce_min_fp8x23_2D_keepdims; +mod reduce_min_fp8x23_2D_axis_1; +mod reduce_min_i32_1D; +mod reduce_min_i32_2D_default; +mod reduce_min_i32_2D_keepdims; +mod reduce_min_i32_2D_axis_1; +mod reduce_min_i8_1D; +mod reduce_min_i8_2D_default; +mod reduce_min_i8_2D_keepdims; +mod reduce_min_i8_2D_axis_1; +mod reduce_min_u32_1D; +mod reduce_min_u32_2D_default; +mod reduce_min_u32_2D_keepdims; +mod reduce_min_u32_2D_axis_1; +mod sequence_construct_fp16x16; +mod sequence_construct_fp8x23; +mod sequence_construct_i32; +mod sequence_construct_i8; +mod sequence_construct_u32; +mod shrink_hard_fp16x16; +mod shrink_soft_fp16x16; +mod shrink_hard_fp8x23; +mod shrink_soft_fp8x23; +mod sequence_empty_fp16x16; +mod sequence_empty_fp8x23; +mod sequence_empty_i32; +mod sequence_empty_i8; +mod sequence_empty_u32; +mod reduce_mean_fp16x16_1D; +mod reduce_mean_fp16x16_2D_default; +mod reduce_mean_fp16x16_2D_keepdims; +mod reduce_mean_fp16x16_2D_axis_1; +mod reduce_mean_fp8x23_1D; +mod reduce_mean_fp8x23_2D_default; +mod reduce_mean_fp8x23_2D_keepdims; +mod reduce_mean_fp8x23_2D_axis_1; +mod reduce_mean_i32_1D; +mod reduce_mean_i32_2D_default; +mod reduce_mean_i32_2D_keepdims; +mod reduce_mean_i32_2D_axis_1; +mod reduce_mean_i8_1D; +mod reduce_mean_i8_2D_default; +mod reduce_mean_i8_2D_keepdims; +mod reduce_mean_i8_2D_axis_1; +mod reduce_mean_u32_1D; +mod reduce_mean_u32_2D_default; +mod reduce_mean_u32_2D_keepdims; +mod reduce_mean_u32_2D_axis_1; +mod pow_fp16x16; +mod pow_fp16x16_broadcast; +mod pow_fp8x23; +mod pow_fp8x23_broadcast; +mod sequence_erase_u32_positive; +mod sequence_erase_u32_negative; +mod sequence_erase_u32_empty; +mod sequence_erase_fp16x16_positive; +mod sequence_erase_fp16x16_negative; +mod sequence_erase_fp16x16_empty; +mod sequence_erase_fp8x23_positive; +mod sequence_erase_fp8x23_negative; +mod sequence_erase_fp8x23_empty; +mod sequence_erase_i32_positive; +mod sequence_erase_i32_negative; +mod sequence_erase_i32_empty; +mod sequence_erase_i8_positive; +mod sequence_erase_i8_negative; +mod sequence_erase_i8_empty; +mod sequence_insert_fp16x16; +mod sequence_insert_fp8x23; +mod sequence_insert_i32; +mod sequence_insert_i8; +mod sequence_insert_u32; +mod concat_from_sequence_fp8x23_new_axis_zero; +mod concat_from_sequence_fp8x23_new_axis_one; +mod concat_from_sequence_fp8x23_new_axis_default; +mod concat_from_sequence_fp16x16_new_axis_zero; +mod concat_from_sequence_fp16x16_new_axis_one; +mod concat_from_sequence_fp16x16_new_axis_default; +mod concat_from_sequence_i32_new_axis_zero; +mod concat_from_sequence_i32_new_axis_one; +mod concat_from_sequence_i32_new_axis_default; +mod concat_from_sequence_i8_new_axis_zero; +mod concat_from_sequence_i8_new_axis_one; +mod concat_from_sequence_i8_new_axis_default; +mod concat_from_sequence_u32_new_axis_zero; +mod concat_from_sequence_u32_new_axis_one; +mod concat_from_sequence_u32_new_axis_default; +// mod is_nan_fp16x16; +// mod is_inf_i32; +// mod is_pos_inf_i32; +// mod is_neg_inf_i32; +mod reduce_log_sum_fp8x23_export_do_not_keepdims; +mod reduce_log_sum_fp8x23_export_keepdims; +mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; +mod reduce_log_sum_fp16x16_export_do_not_keepdims; +mod reduce_log_sum_fp16x16_export_keepdims; +mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; +mod and_bool; +mod erf_fp16x16; +mod erf_fp8x23; +mod unique_fp16x16_without_axis_sorted; +mod unique_fp16x16_with_axis_zero_sorted; +mod unique_u32_without_axis_sorted; +mod unique_u32_without_axis_not_sorted; +mod unique_u32_with_axis_zero_sorted; +mod unique_u32_with_axis_zero_not_sorted; +mod unique_u32_with_axis_one_sorted; +mod unique_u32_with_axis_one_not_sorted; +mod gather_nd_fp16x16_3d_default; +mod gather_nd_fp16x16_3d_batch_dims1; +mod gather_nd_fp16x16_3d_batch_dims2; +mod gather_nd_fp8x23_3d_default; +mod gather_nd_fp8x23_3d_batch_dims1; +mod gather_nd_fp8x23_3d_batch_dims2; +mod gather_nd_i32_3d_default; +mod gather_nd_i32_3d_batch_dims1; +mod gather_nd_i32_3d_batch_dims2; +mod gather_nd_i8_3d_default; +mod gather_nd_i8_3d_batch_dims1; +mod gather_nd_u32_default; +mod gather_nd_u32_batch_dims1; +mod gather_nd_u32_batch_dims2; +mod resize_upsample_scales_nearest; +mod resize_downsample_scales_cubic; +mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; +mod resize_downsample_scales_cubic_align_corners; +mod resize_upsample_scales_linear; +mod resize_downsample_scales_linear_align_corners; +mod resize_downsample_scales_nearest; +mod resize_upsample_scales_cubic; +mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; +mod resize_upsample_scales_cubic_align_corners; +mod resize_upsample_scales_cubic_asymmetric; +mod resize_upsample_scales_linear_align_corners; +mod resize_upsample_sizes_nearest; +mod resize_upsample_sizes_cubic; +mod resize_downsample_sizes_cubic; +mod resize_downsample_sizes_nearest; +mod resize_upsample_scales_linear_half_pixel_symmetric; +mod resize_downsample_scales_cubic_antialias; +mod resize_downsample_scales_linear_antialias; +mod resize_downsample_sizes_cubic_antialias; +mod resize_downsample_sizes_linear_pytorch_half_pixel; +mod resize_tf_crop_and_resize; +mod resize_tf_crop_and_resize_extrapolation_value; +mod resize_upsample_scales_nearest_axes_2_3; +mod resize_upsample_scales_nearest_axes_3_2; +mod resize_upsample_sizes_nearest_axes_2_3; +mod resize_upsample_sizes_nearest_ceil_half_pixel; +mod resize_upsample_sizes_nearest_floor_align_corners; +mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; +mod resize_downsample_scales_linear_half_pixel_symmetric; +mod resize_downsample_sizes_nearest_not_larger; +mod resize_downsample_sizes_nearest_not_smaller; +mod resize_tf_crop_and_resize_axes_2_3; +mod resize_tf_crop_and_resize_axes_3_2; +mod resize_upsample_sizes_nearest_axes_3_2; +mod resize_upsample_sizes_nearest_not_larger; +mod resize_upsample_sizes_nearest_not_smaller; +mod compress_fp16x16_3d_default; +mod compress_fp16x16_3d_axis1; +mod compress_fp16x16_3d_axis2; +mod compress_fp16x16_3d_axis3; +mod compress_fp16x16_3d_noaxis; +mod compress_fp8x23_3d_default; +mod compress_fp8x23_3d_axis1; +mod compress_fp8x23_3d_axis2; +mod compress_i32_3d_default; +mod compress_i32_3d_axis1; +mod compress_i32_3d_axis2; +mod compress_i8_3d_default; +mod compress_i8_3d_axis1; +mod compress_i8_3d_axis2; +mod compress_u32_3d_default; +mod compress_u32_3d_axis1; +mod compress_u32_3d_axis2; +mod compress_u32_3d_axis2_2; +mod compress_u32_3d_axis3; +mod reduce_log_sum_exp_fp32x32_export_do_not_keepdims; +mod reduce_log_sum_exp_fp32x32_export_keepdims; +mod reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims; +mod layer_normalization_default_axis; +mod layer_normalization_4d_axis0; +mod layer_normalization_4d_axis1; +mod layer_normalization_4d_axis2; +mod layer_normalization_4d_axis3; +mod layer_normalization_3d_axis0_epsilon; +mod layer_normalization_3d_axis_negative_3_epsilon; +mod layer_normalization_3d_axis1_epsilon; +mod layer_normalization_3d_axis2_epsilon; +mod layer_normalization_4d_axis_negative_4; +mod layer_normalization_4d_axis_negative_3; +mod layer_normalization_4d_axis_negative_2; +mod layer_normalization_4d_axis_negative_1; +mod layer_normalization_3d_axis_negative_2_epsilon; +mod layer_normalization_3d_axis_negative_1_epsilon; +mod layer_normalization_test; +mod split_u32_1d_equal_parts; +mod split_u32_2d_equal_parts; +mod split_u32_zero_size; +mod split_u32_1d_variable_parts; +mod split_u32_2d_variable_parts; +mod split_u32_1d_uneven; +mod split_u32_2d_uneven; +mod split_fp16x16_1d_equal_parts; +mod split_fp16x16_1d_variable_parts; +mod split_fp16x16_2d_equal_parts; +mod split_fp16x16_2d_variable_parts; +mod split_fp16x16_zero_size; +mod split_fp16x16_1d_uneven; +mod split_fp16x16_2d_uneven; +mod grid_sample; +mod grid_sample_cubic; +mod grid_sample_aligncorners; +mod grid_sample_nearest; +mod grid_sample_nearest_aligncorner; +mod grid_sample_padding_border; +mod grid_sample_padding_reflection; +mod grid_sample_padding_zeros; +mod col2im; +mod col2im_5D; +mod col2im_dilations; +mod col2im_pads; +mod col2im_strides; +mod random_uniform_like_fp16x16; +mod random_uniform_like_fp8x23; +mod range_fp8x23; +mod range_fp16x16; +mod range_i32; +mod range_i8; +mod range_u32; +mod hann_window_fp8x23; +mod hann_window_fp16x16; +mod hamming_window_fp16x16; +mod hamming_window_fp8x23; +mod blackman_window_fp16x16; +mod blackman_window_fp8x23; +mod split_to_sequence_fp16x16_1d_equal_parts; +mod split_to_sequence_fp16x16_1d_variable_parts; +mod split_to_sequence_fp16x16_2d_equal_parts; +mod split_to_sequence_fp16x16_2d_variable_parts; +mod split_to_sequence_fp16x16_zero_size; +mod split_to_sequence_fp16x16_1d_uneven; +mod split_to_sequence_fp16x16_2d_uneven; +mod split_to_sequence_u32_1d_equal_parts; +mod split_to_sequence_u32_1d_variable_parts; +mod split_to_sequence_u32_2d_equal_parts; +mod split_to_sequence_u32_2d_variable_parts; +mod split_to_sequence_u32_zero_size; +mod split_to_sequence_u32_1d_uneven; +mod split_to_sequence_u32_2d_uneven; +mod split_to_sequence_2d_scalar; +mod split_to_sequence_2d_nokeepdims; +mod split_to_sequence_1d_nokeepdims; +mod reverse_sequence_fp16x16_batch_equal_parts; +mod reverse_sequence_fp16x16_time_equal_parts; +mod reverse_sequence_i32_batch_equal_parts; +mod reverse_sequence_i32_time_equal_parts; +mod reverse_sequence_i8_batch_equal_parts; +mod reverse_sequence_i8_time_equal_parts; +mod reverse_sequence_u32_4x4_batch; +mod reverse_sequence_u32_4x4_time; +mod reverse_sequence_u32_3x3_batch; +mod reverse_sequence_u32_3x3_time; +mod reverse_sequence_different_dimensions_4_5; +mod reverse_sequence_different_dimensions_2_4; +mod reverse_sequence_different_dimensions_1_6; +mod reverse_sequence_different_dimensions_3x9_batch; +mod reverse_sequence_different_dimensions_3x9_time; +mod conv_transpose; +mod conv_transpose_1d; +mod conv_transpose_3d; +mod conv_transpose_attributes; +mod conv_transpose_autopad_same; +mod conv_transpose_dilations; +mod conv_transpose_pads; +mod conv_transpose_group_2; +mod conv_transpose_group_2_image_3; +mod depth_to_space_fp16x16; +mod depth_to_space_fp8x23; +mod depth_to_space_i32; +mod depth_to_space_i8; +mod depth_to_space_u32; +mod space_to_depth_fp16x16; +mod space_to_depth_fp8x23; +mod space_to_depth_i32; +mod space_to_depth_i8; +mod space_to_depth_u32; +mod scatter_nd_fp16x16_3d_default; +mod scatter_nd_fp16x16_3d_add; +mod scatter_nd_fp16x16_3d_mul; +mod scatter_nd_fp16x16_3d_max; +mod scatter_nd_fp16x16_3d_min; +mod scatter_nd_fp8x23_3d_default; +mod scatter_nd_fp8x23_3d_add; +mod scatter_nd_fp8x23_3d_mul; +mod scatter_nd_fp8x23_3d_max; +mod scatter_nd_fp8x23_3d_min; +mod scatter_nd_u32_default; +mod scatter_nd_u32_add; +mod scatter_nd_u32_mul; +mod scatter_nd_u32_max; +mod scatter_nd_u32_min; +mod conv_2D_with_padding; +mod conv_1D_no_padding; +mod conv_1D_with_padding; +mod conv_3D_no_padding; +mod conv_3D_with_padding; +mod conv_4D_no_padding; +mod conv_2D_with_2_groups; +mod conv_2D_with_autopad_same; +mod conv_2D_with_strides_asymmetric_padding; +mod conv_2D_with_strides_with_padding; +mod conv_4D_with_padding; +mod label_encoder_fp16x16_3d_default; +mod label_encoder_fp8x23_default; +mod label_encoder_i8_default; +mod label_encoder_i32_default; +mod label_encoder_u32_default; +mod reduce_sum_single_axis_fp16x16_1D; +mod reduce_sum_single_axis_fp16x16_2D_default; +mod reduce_sum_single_axis_fp16x16_2D_keepdims; +mod reduce_sum_single_axis_fp16x16_2D_axis_1; +mod reduce_sum_single_axis_fp8x23_1D; +mod reduce_sum_single_axis_fp8x23_2D_default; +mod reduce_sum_single_axis_fp8x23_2D_keepdims; +mod reduce_sum_single_axis_fp8x23_2D_axis_1; +mod reduce_sum_single_axis_i32_1D; +mod reduce_sum_single_axis_i32_2D_default; +mod reduce_sum_single_axis_i32_2D_keepdims; +mod reduce_sum_single_axis_i32_2D_axis_1; +mod reduce_sum_single_axis_i8_1D; +mod reduce_sum_single_axis_i8_2D_default; +mod reduce_sum_single_axis_i8_2D_keepdims; +mod reduce_sum_single_axis_i8_2D_axis_1; +mod reduce_sum_single_axis_u32_1D; +mod reduce_sum_single_axis_u32_2D_default; +mod reduce_sum_single_axis_u32_2D_keepdims; +mod reduce_sum_single_axis_u32_2D_axis_1; +mod reduce_sum_keep_dims; +mod reduce_sum_no_keep_dims; +mod reduce_sum_default_axes_keepdims; +mod reduce_sum_empty_axes_input_noop; +mod and_bool_broadcast; +mod reshape_extended_dims; +mod reshape_negative_dim; +mod reshape_negative_extended_dims; +mod reshape_one_dim; +mod reshape_reduced_dims; +mod reshape_reordered_all_dims; +mod reshape_reordered_last_dims; +mod reshape_zero_and_negative_dim; +mod reshape_zero_dim; diff --git a/tests/nodes/reshape_extended_dims.cairo b/tests/nodes/reshape_extended_dims.cairo new file mode 100644 index 000000000..80a7c5cf2 --- /dev/null +++ b/tests/nodes/reshape_extended_dims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reshape_extended_dims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reshape(array![2, 3, 2, 2].span()); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reshape_extended_dims/input_0.cairo b/tests/nodes/reshape_extended_dims/input_0.cairo new file mode 100644 index 000000000..1bf8d2578 --- /dev/null +++ b/tests/nodes/reshape_extended_dims/input_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_extended_dims/output_0.cairo b/tests/nodes/reshape_extended_dims/output_0.cairo new file mode 100644 index 000000000..de2f5850b --- /dev/null +++ b/tests/nodes/reshape_extended_dims/output_0.cairo @@ -0,0 +1,39 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_negative_dim.cairo b/tests/nodes/reshape_negative_dim.cairo new file mode 100644 index 000000000..28230dfaa --- /dev/null +++ b/tests/nodes/reshape_negative_dim.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reshape_negative_dim() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reshape(array![2, -1, 2].span()); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reshape_negative_dim/input_0.cairo b/tests/nodes/reshape_negative_dim/input_0.cairo new file mode 100644 index 000000000..1bf8d2578 --- /dev/null +++ b/tests/nodes/reshape_negative_dim/input_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_negative_dim/output_0.cairo b/tests/nodes/reshape_negative_dim/output_0.cairo new file mode 100644 index 000000000..ad355bfd6 --- /dev/null +++ b/tests/nodes/reshape_negative_dim/output_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(6); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_negative_extended_dims.cairo b/tests/nodes/reshape_negative_extended_dims.cairo new file mode 100644 index 000000000..58e4e2440 --- /dev/null +++ b/tests/nodes/reshape_negative_extended_dims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reshape_negative_extended_dims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reshape(array![-1, 2, 3, 4].span()); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reshape_negative_extended_dims/input_0.cairo b/tests/nodes/reshape_negative_extended_dims/input_0.cairo new file mode 100644 index 000000000..1bf8d2578 --- /dev/null +++ b/tests/nodes/reshape_negative_extended_dims/input_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_negative_extended_dims/output_0.cairo b/tests/nodes/reshape_negative_extended_dims/output_0.cairo new file mode 100644 index 000000000..66d21516d --- /dev/null +++ b/tests/nodes/reshape_negative_extended_dims/output_0.cairo @@ -0,0 +1,39 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(3); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_one_dim.cairo b/tests/nodes/reshape_one_dim.cairo new file mode 100644 index 000000000..d1d7ec8ea --- /dev/null +++ b/tests/nodes/reshape_one_dim.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reshape_one_dim() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reshape(array![24].span()); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reshape_one_dim/input_0.cairo b/tests/nodes/reshape_one_dim/input_0.cairo new file mode 100644 index 000000000..1bf8d2578 --- /dev/null +++ b/tests/nodes/reshape_one_dim/input_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_one_dim/output_0.cairo b/tests/nodes/reshape_one_dim/output_0.cairo new file mode 100644 index 000000000..a6ad8efcb --- /dev/null +++ b/tests/nodes/reshape_one_dim/output_0.cairo @@ -0,0 +1,36 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(24); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_reduced_dims.cairo b/tests/nodes/reshape_reduced_dims.cairo new file mode 100644 index 000000000..4025a95a3 --- /dev/null +++ b/tests/nodes/reshape_reduced_dims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reshape_reduced_dims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reshape(array![2,12].span()); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reshape_reduced_dims/input_0.cairo b/tests/nodes/reshape_reduced_dims/input_0.cairo new file mode 100644 index 000000000..1bf8d2578 --- /dev/null +++ b/tests/nodes/reshape_reduced_dims/input_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_reduced_dims/output_0.cairo b/tests/nodes/reshape_reduced_dims/output_0.cairo new file mode 100644 index 000000000..3ab9777df --- /dev/null +++ b/tests/nodes/reshape_reduced_dims/output_0.cairo @@ -0,0 +1,37 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(12); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_reordered_all_dims.cairo b/tests/nodes/reshape_reordered_all_dims.cairo new file mode 100644 index 000000000..a31b6e23c --- /dev/null +++ b/tests/nodes/reshape_reordered_all_dims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reshape_reordered_all_dims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reshape(array![4,2,3].span()); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reshape_reordered_all_dims/input_0.cairo b/tests/nodes/reshape_reordered_all_dims/input_0.cairo new file mode 100644 index 000000000..1bf8d2578 --- /dev/null +++ b/tests/nodes/reshape_reordered_all_dims/input_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_reordered_all_dims/output_0.cairo b/tests/nodes/reshape_reordered_all_dims/output_0.cairo new file mode 100644 index 000000000..2308361dc --- /dev/null +++ b/tests/nodes/reshape_reordered_all_dims/output_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(2); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_reordered_last_dims.cairo b/tests/nodes/reshape_reordered_last_dims.cairo new file mode 100644 index 000000000..4a1e47e5f --- /dev/null +++ b/tests/nodes/reshape_reordered_last_dims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reshape_reordered_last_dims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reshape(array![2,4,3].span()); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reshape_reordered_last_dims/input_0.cairo b/tests/nodes/reshape_reordered_last_dims/input_0.cairo new file mode 100644 index 000000000..1bf8d2578 --- /dev/null +++ b/tests/nodes/reshape_reordered_last_dims/input_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_reordered_last_dims/output_0.cairo b/tests/nodes/reshape_reordered_last_dims/output_0.cairo new file mode 100644 index 000000000..bb307aeb5 --- /dev/null +++ b/tests/nodes/reshape_reordered_last_dims/output_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(4); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_zero_and_negative_dim.cairo b/tests/nodes/reshape_zero_and_negative_dim.cairo new file mode 100644 index 000000000..c3ab116c7 --- /dev/null +++ b/tests/nodes/reshape_zero_and_negative_dim.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reshape_zero_and_negative_dim() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reshape(array![2, 0, 1, -1].span()); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reshape_zero_and_negative_dim/input_0.cairo b/tests/nodes/reshape_zero_and_negative_dim/input_0.cairo new file mode 100644 index 000000000..1bf8d2578 --- /dev/null +++ b/tests/nodes/reshape_zero_and_negative_dim/input_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_zero_and_negative_dim/output_0.cairo b/tests/nodes/reshape_zero_and_negative_dim/output_0.cairo new file mode 100644 index 000000000..b54f6fc0c --- /dev/null +++ b/tests/nodes/reshape_zero_and_negative_dim/output_0.cairo @@ -0,0 +1,39 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + shape.append(1); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_zero_dim.cairo b/tests/nodes/reshape_zero_dim.cairo new file mode 100644 index 000000000..4cc3e8870 --- /dev/null +++ b/tests/nodes/reshape_zero_dim.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; + +#[test] +#[available_gas(2000000000)] +fn test_reshape_zero_dim() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reshape(array![2, 0, 4, 1].span()); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reshape_zero_dim/input_0.cairo b/tests/nodes/reshape_zero_dim/input_0.cairo new file mode 100644 index 000000000..1bf8d2578 --- /dev/null +++ b/tests/nodes/reshape_zero_dim/input_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_zero_dim/output_0.cairo b/tests/nodes/reshape_zero_dim/output_0.cairo new file mode 100644 index 000000000..4c6823a34 --- /dev/null +++ b/tests/nodes/reshape_zero_dim/output_0.cairo @@ -0,0 +1,39 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + shape.append(4); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} From b988a32bf17826efb0c895ea5754dd0442ce0849 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Fri, 22 Mar 2024 09:11:56 +0100 Subject: [PATCH 27/68] update doc --- docs/framework/operators/tensor/tensor.and.md | 4 ++-- docs/framework/operators/tensor/tensor.equal.md | 2 +- docs/framework/operators/tensor/tensor.is_inf.md | 4 ++-- docs/framework/operators/tensor/tensor.is_nan.md | 4 ++-- docs/framework/operators/tensor/tensor.less.md | 2 +- docs/framework/operators/tensor/tensor.not.md | 2 +- docs/framework/operators/tensor/tensor.reshape.md | 4 ++-- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/docs/framework/operators/tensor/tensor.and.md b/docs/framework/operators/tensor/tensor.and.md index d7e3ded32..58b13651b 100644 --- a/docs/framework/operators/tensor/tensor.and.md +++ b/docs/framework/operators/tensor/tensor.and.md @@ -1,7 +1,7 @@ #tensor.and ```rust - fn and(self: @Tensor, other: @Tensor) -> Tensor; + fn and(self: @Tensor, other: @Tensor) -> Tensor; ``` Computes the logical AND of two tensors element-wise. @@ -29,7 +29,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, BoolTensor}; -fn and_example() -> Tensor { +fn and_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3].span(), data: array![false, true, false, false, false, true, true, false, true, false, false, true].span(), ); diff --git a/docs/framework/operators/tensor/tensor.equal.md b/docs/framework/operators/tensor/tensor.equal.md index e8e91c20f..c5157498a 100644 --- a/docs/framework/operators/tensor/tensor.equal.md +++ b/docs/framework/operators/tensor/tensor.equal.md @@ -1,7 +1,7 @@ #tensor.equal ```rust - fn equal(self: @Tensor, other: @Tensor) -> Tensor; + fn equal(self: @Tensor, other: @Tensor) -> Tensor; ``` Check if two tensors are equal element-wise. diff --git a/docs/framework/operators/tensor/tensor.is_inf.md b/docs/framework/operators/tensor/tensor.is_inf.md index 313b4d8b8..c0c02a548 100644 --- a/docs/framework/operators/tensor/tensor.is_inf.md +++ b/docs/framework/operators/tensor/tensor.is_inf.md @@ -1,7 +1,7 @@ ## tensor.is_inf ```rust - fn is_inf(self: @Tensor, detect_negative: Option, detect_positive: Option) -> Tensor; + fn is_inf(self: @Tensor, detect_negative: Option, detect_positive: Option) -> Tensor; ``` Maps infinity to true and other values to false. @@ -23,7 +23,7 @@ A new `Tensor` instance with entries set to true iff the input tensors cor use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{BoolTensor, TensorTrait, Tensor, U32Tensor}; -fn is_inf_example() -> Tensor { +fn is_inf_example() -> Tensor { let tensor = TensorTrait::::new( shape: array![6].span(), data: array![1, 0, NumberTrait::INF(), 8, NumberTrait::INF(), NumberTrait::INF()].span(), ); diff --git a/docs/framework/operators/tensor/tensor.is_nan.md b/docs/framework/operators/tensor/tensor.is_nan.md index af6cfa222..88db61e3f 100644 --- a/docs/framework/operators/tensor/tensor.is_nan.md +++ b/docs/framework/operators/tensor/tensor.is_nan.md @@ -1,7 +1,7 @@ ## tensor.is_nan ```rust - fn is_nan(self: @Tensor) -> Tensor; + fn is_nan(self: @Tensor) -> Tensor; ``` Maps NaN to true and other values to false. @@ -21,7 +21,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{BoolTensor, TensorTrait, Tensor, FP8x23Tensor}; use orion::numbers::{FixedTrait, FP8x23}; -fn is_nan_example() -> Tensor { +fn is_nan_example() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(4); diff --git a/docs/framework/operators/tensor/tensor.less.md b/docs/framework/operators/tensor/tensor.less.md index 797e51f89..96586b346 100644 --- a/docs/framework/operators/tensor/tensor.less.md +++ b/docs/framework/operators/tensor/tensor.less.md @@ -53,7 +53,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn less_example() -> Tensor { +fn less_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); diff --git a/docs/framework/operators/tensor/tensor.not.md b/docs/framework/operators/tensor/tensor.not.md index f9ee10cd9..ee482ec65 100644 --- a/docs/framework/operators/tensor/tensor.not.md +++ b/docs/framework/operators/tensor/tensor.not.md @@ -23,7 +23,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, BoolTensor}; -fn not_example() -> Tensor { +fn not_example() -> Tensor { let tensor = TensorTrait::new( shape: array![3].span(), data: array![ diff --git a/docs/framework/operators/tensor/tensor.reshape.md b/docs/framework/operators/tensor/tensor.reshape.md index b2c8f84eb..399e5fc85 100644 --- a/docs/framework/operators/tensor/tensor.reshape.md +++ b/docs/framework/operators/tensor/tensor.reshape.md @@ -1,7 +1,7 @@ # tensor.reshape ```rust - fn reshape(self: @Tensor, target_shape: Span) -> Tensor; + fn reshape(self: @Tensor, target_shape: Span) -> Tensor; ``` Returns a new tensor with the specified target shape and the same data as the input tensor. @@ -9,7 +9,7 @@ Returns a new tensor with the specified target shape and the same data as the in ## Args * `self`(`@Tensor`) - The input tensor. -* `target_shape`(Span) - A span containing the target shape of the tensor. +* `target_shape`(Span) - A span containing the target shape of the tensor. ## Panics From 0eea108cfdf76155d9476ac712f906364289802e Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Fri, 22 Mar 2024 09:45:01 +0100 Subject: [PATCH 28/68] replace type of conditional operators --- src/operators/tensor/core.cairo | 76 +++++++++---------- .../tensor/implementations/tensor_bool.cairo | 16 ++-- .../implementations/tensor_complex64.cairo | 16 ++-- .../implementations/tensor_fp16x16.cairo | 16 ++-- .../implementations/tensor_fp16x16wide.cairo | 16 ++-- .../implementations/tensor_fp32x32.cairo | 16 ++-- .../implementations/tensor_fp64x64.cairo | 16 ++-- .../implementations/tensor_fp8x23.cairo | 16 ++-- .../implementations/tensor_fp8x23wide.cairo | 16 ++-- .../tensor/implementations/tensor_i32.cairo | 16 ++-- .../tensor/implementations/tensor_i8.cairo | 16 ++-- .../tensor/implementations/tensor_u32.cairo | 16 ++-- src/operators/tensor/math/and.cairo | 6 +- src/operators/tensor/math/equal.cairo | 7 +- src/operators/tensor/math/greater.cairo | 7 +- src/operators/tensor/math/greater_equal.cairo | 7 +- src/operators/tensor/math/less.cairo | 7 +- src/operators/tensor/math/less_equal.cairo | 7 +- src/operators/tensor/math/or.cairo | 7 +- src/operators/tensor/math/xor.cairo | 7 +- tests/lib.cairo | 12 +-- 21 files changed, 156 insertions(+), 163 deletions(-) diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 45c5782e4..5a673c3f9 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -1085,7 +1085,7 @@ trait TensorTrait { /// #tensor.equal /// /// ```rust - /// fn equal(self: @Tensor, other: @Tensor) -> Tensor; + /// fn equal(self: @Tensor, other: @Tensor) -> Tensor; /// ``` /// /// Check if two tensors are equal element-wise. @@ -1104,7 +1104,7 @@ trait TensorTrait { /// /// ## Returns /// - /// A new `Tensor` of booleans (1 if equal, 0 otherwise) with the same shape as the broadcasted inputs. + /// A new `Tensor` (1 if equal, 0 otherwise) with the same shape as the broadcasted inputs. /// /// ## Examples /// @@ -1115,7 +1115,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn eq_example() -> Tensor { + /// fn eq_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1137,7 +1137,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn eq_example() -> Tensor { + /// fn eq_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1150,11 +1150,11 @@ trait TensorTrait { /// >>> [true,true,true,false,false,false,false,false,false] /// ``` /// - fn equal(self: @Tensor, other: @Tensor) -> Tensor; + fn equal(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.greater /// /// ```rust - /// fn greater(self: @Tensor, other: @Tensor) -> Tensor; + /// fn greater(self: @Tensor, other: @Tensor) -> Tensor; /// ``` /// /// Check if each element of the first tensor is greater than the corresponding element of the second tensor. @@ -1173,7 +1173,7 @@ trait TensorTrait { /// /// ## Returns /// - /// A new `Tensor` of booleans (0 or 1) with the same shape as the broadcasted inputs. + /// A new `Tensor` of booleans (0 or 1) with the same shape as the broadcasted inputs. /// /// ## Examples /// @@ -1184,7 +1184,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn greater_example() -> Tensor { + /// fn greater_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1206,7 +1206,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn greater_example() -> Tensor { + /// fn greater_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1219,11 +1219,11 @@ trait TensorTrait { /// >>> [0,0,0,1,1,1,1,1,1] /// ``` /// - fn greater(self: @Tensor, other: @Tensor) -> Tensor; + fn greater(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.greater_equal /// /// ```rust - /// fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor; + /// fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor; /// ``` /// /// Check if each element of the first tensor is greater than or equal to the corresponding element of the second tensor. @@ -1253,7 +1253,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn greater_equal_example() -> Tensor { + /// fn greater_equal_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1275,7 +1275,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn greater_equal_example() -> Tensor { + /// fn greater_equal_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1288,11 +1288,11 @@ trait TensorTrait { /// >>> [1,1,1,1,1,1,0,0,0] /// ``` /// - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor; + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.less /// /// ```rust - /// fn less(self: @Tensor, other: @Tensor) -> Tensor; + /// fn less(self: @Tensor, other: @Tensor) -> Tensor; /// ``` /// /// Check if each element of the first tensor is less than the corresponding element of the second tensor. @@ -1322,7 +1322,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn less_example() -> Tensor { + /// fn less_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1344,7 +1344,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn less_example() -> Tensor { + /// fn less_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1357,11 +1357,11 @@ trait TensorTrait { /// >>> [false,false,false,false,false,false,false,true,true] /// ``` /// - fn less(self: @Tensor, other: @Tensor) -> Tensor; + fn less(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.less_equal /// /// ```rust - /// fn less_equal(self: @Tensor, other: @Tensor) -> Tensor; + /// fn less_equal(self: @Tensor, other: @Tensor) -> Tensor; /// ``` /// /// Check if each element of the first tensor is less than or equal to the corresponding element of the second tensor. @@ -1391,7 +1391,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn less_equal_example() -> Tensor { + /// fn less_equal_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1413,7 +1413,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn less_equal_example() -> Tensor { + /// fn less_equal_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1426,7 +1426,7 @@ trait TensorTrait { /// >>> [1,1,1,0,0,0,1,1,1] /// ``` /// - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor; + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.abs /// /// ```rust @@ -2158,7 +2158,7 @@ trait TensorTrait { /// #tensor.or /// /// ```rust - /// fn or(self: @Tensor, other: @Tensor) -> Tensor; + /// fn or(self: @Tensor, other: @Tensor) -> Tensor; /// ``` /// /// Computes the logical OR of two tensors element-wise. @@ -2177,7 +2177,7 @@ trait TensorTrait { /// /// ## Returns /// - /// A new `Tensor` of booleans (0 or 1) with the same shape as the broadcasted inputs. + /// A new `Tensor` (0 or 1) with the same shape as the broadcasted inputs. /// /// ## Examples /// @@ -2188,7 +2188,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn or_example() -> Tensor { + /// fn or_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -2209,7 +2209,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn or_example() -> Tensor { + /// fn or_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -2223,11 +2223,11 @@ trait TensorTrait { /// >>> [0,1,1,1,1,1,1,1,1] /// ``` /// - fn or(self: @Tensor, other: @Tensor) -> Tensor; + fn or(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.xor /// /// ```rust - /// fn xor(self: @Tensor, other: @Tensor) -> Tensor; + /// fn xor(self: @Tensor, other: @Tensor) -> Tensor; /// ``` /// /// Computes the logical XOR of two tensors element-wise. @@ -2246,7 +2246,7 @@ trait TensorTrait { /// /// ## Returns /// - /// A new `Tensor` of booleans (0 or 1) with the same shape as the broadcasted inputs. + /// A new `Tensor` (0 or 1) with the same shape as the broadcasted inputs. /// /// ## Examples /// @@ -2257,7 +2257,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn xor_example() -> Tensor { + /// fn xor_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -2278,7 +2278,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn xor_example() -> Tensor { + /// fn xor_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -2292,7 +2292,7 @@ trait TensorTrait { /// >>> [0,0,0,1,0,0,1,0,0] /// ``` /// - fn xor(self: @Tensor, other: @Tensor) -> Tensor; + fn xor(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.acos /// /// ```rust @@ -3465,7 +3465,7 @@ trait TensorTrait { /// #tensor.and /// /// ```rust - /// fn and(self: @Tensor, other: @Tensor) -> Tensor; + /// fn and(self: @Tensor, other: @Tensor) -> Tensor; /// ``` /// /// Computes the logical AND of two tensors element-wise. @@ -3484,7 +3484,7 @@ trait TensorTrait { /// /// ## Returns /// - /// A new `Tensor` with the same shape as the broadcasted inputs. + /// A new `Tensor` with the same shape as the broadcasted inputs. /// /// ## Examples /// @@ -3493,7 +3493,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, BoolTensor}; /// - /// fn and_example() -> Tensor { + /// fn and_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3].span(), data: array![false, true, false, false, false, true, true, false, true, false, false, true].span(), /// ); @@ -3507,7 +3507,7 @@ trait TensorTrait { /// >>> [false, false, false, false, false, true, false, false, false, false, false, true] /// ``` /// - fn and(self: @Tensor, other: @Tensor) -> Tensor; + fn and(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.where /// /// ```rust @@ -4755,7 +4755,7 @@ trait TensorTrait { /// #tensor.not /// /// ```rust - /// fn not(self: @Tensor) -> Tensor) -> Tensor; /// ``` /// /// Computes the negation of the elements in the bool type input tensor. @@ -4777,7 +4777,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, BoolTensor}; /// - /// fn not_example() -> Tensor { + /// fn not_example() -> Tensor { /// let tensor = TensorTrait::new( /// shape: array![3].span(), /// data: array![ diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index ad091178f..8d7f6706c 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -109,23 +109,23 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } - fn greater(self: @Tensor, other: @Tensor) -> Tensor { + fn greater(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } @@ -187,11 +187,11 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } - fn xor(self: @Tensor, other: @Tensor) -> Tensor { + fn xor(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } - fn or(self: @Tensor, other: @Tensor) -> Tensor { + fn or(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } @@ -259,7 +259,7 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 6f369ea32..f6f2b4f15 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -128,23 +128,23 @@ impl Complex64Tensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } - fn greater(self: @Tensor, other: @Tensor) -> Tensor { + fn greater(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } @@ -206,11 +206,11 @@ impl Complex64Tensor of TensorTrait { math::atan::atan(*self) } - fn xor(self: @Tensor, other: @Tensor) -> Tensor { + fn xor(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } - fn or(self: @Tensor, other: @Tensor) -> Tensor { + fn or(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } @@ -347,7 +347,7 @@ impl Complex64Tensor of TensorTrait { panic(array!['not supported!']) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index 321bca7c5..1671b07bd 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -122,23 +122,23 @@ impl FP16x16Tensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } - fn greater(self: @Tensor, other: @Tensor) -> Tensor { + fn greater(self: @Tensor, other: @Tensor) -> Tensor { math::greater::greater(self, other) } - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } @@ -200,11 +200,11 @@ impl FP16x16Tensor of TensorTrait { math::atan::atan(*self) } - fn xor(self: @Tensor, other: @Tensor) -> Tensor { + fn xor(self: @Tensor, other: @Tensor) -> Tensor { math::xor::xor(self, other) } - fn or(self: @Tensor, other: @Tensor) -> Tensor { + fn or(self: @Tensor, other: @Tensor) -> Tensor { math::or::or(self, other) } @@ -385,7 +385,7 @@ impl FP16x16Tensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index 280967a07..b542b625f 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -132,23 +132,23 @@ impl FP16x16WTensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } - fn greater(self: @Tensor, other: @Tensor) -> Tensor { + fn greater(self: @Tensor, other: @Tensor) -> Tensor { math::greater::greater(self, other) } - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } @@ -210,11 +210,11 @@ impl FP16x16WTensor of TensorTrait { math::atan::atan(*self) } - fn xor(self: @Tensor, other: @Tensor) -> Tensor { + fn xor(self: @Tensor, other: @Tensor) -> Tensor { math::xor::xor(self, other) } - fn or(self: @Tensor, other: @Tensor) -> Tensor { + fn or(self: @Tensor, other: @Tensor) -> Tensor { math::or::or(self, other) } @@ -351,7 +351,7 @@ impl FP16x16WTensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 007909cd8..1ba415db9 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -120,23 +120,23 @@ impl FP32x32Tensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } - fn greater(self: @Tensor, other: @Tensor) -> Tensor { + fn greater(self: @Tensor, other: @Tensor) -> Tensor { math::greater::greater(self, other) } - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } @@ -198,11 +198,11 @@ impl FP32x32Tensor of TensorTrait { math::atan::atan(*self) } - fn xor(self: @Tensor, other: @Tensor) -> Tensor { + fn xor(self: @Tensor, other: @Tensor) -> Tensor { math::xor::xor(self, other) } - fn or(self: @Tensor, other: @Tensor) -> Tensor { + fn or(self: @Tensor, other: @Tensor) -> Tensor { math::or::or(self, other) } @@ -383,7 +383,7 @@ impl FP32x32Tensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index fdeab79d7..1f276a37d 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -119,23 +119,23 @@ impl FP64x64Tensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } - fn greater(self: @Tensor, other: @Tensor) -> Tensor { + fn greater(self: @Tensor, other: @Tensor) -> Tensor { math::greater::greater(self, other) } - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } @@ -197,11 +197,11 @@ impl FP64x64Tensor of TensorTrait { math::atan::atan(*self) } - fn xor(self: @Tensor, other: @Tensor) -> Tensor { + fn xor(self: @Tensor, other: @Tensor) -> Tensor { math::xor::xor(self, other) } - fn or(self: @Tensor, other: @Tensor) -> Tensor { + fn or(self: @Tensor, other: @Tensor) -> Tensor { math::or::or(self, other) } @@ -382,7 +382,7 @@ impl FP64x64Tensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index aa1399dbb..f27237169 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -121,23 +121,23 @@ impl FP8x23Tensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } - fn greater(self: @Tensor, other: @Tensor) -> Tensor { + fn greater(self: @Tensor, other: @Tensor) -> Tensor { math::greater::greater(self, other) } - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } @@ -199,11 +199,11 @@ impl FP8x23Tensor of TensorTrait { math::atan::atan(*self) } - fn xor(self: @Tensor, other: @Tensor) -> Tensor { + fn xor(self: @Tensor, other: @Tensor) -> Tensor { math::xor::xor(self, other) } - fn or(self: @Tensor, other: @Tensor) -> Tensor { + fn or(self: @Tensor, other: @Tensor) -> Tensor { math::or::or(self, other) } @@ -384,7 +384,7 @@ impl FP8x23Tensor of TensorTrait { core_ops::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index 6fd728c65..ba6cc8ce7 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -124,23 +124,23 @@ impl FP8x23WTensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } - fn greater(self: @Tensor, other: @Tensor) -> Tensor { + fn greater(self: @Tensor, other: @Tensor) -> Tensor { math::greater::greater(self, other) } - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } @@ -202,11 +202,11 @@ impl FP8x23WTensor of TensorTrait { math::atan::atan(*self) } - fn xor(self: @Tensor, other: @Tensor) -> Tensor { + fn xor(self: @Tensor, other: @Tensor) -> Tensor { math::xor::xor(self, other) } - fn or(self: @Tensor, other: @Tensor) -> Tensor { + fn or(self: @Tensor, other: @Tensor) -> Tensor { math::or::or(self, other) } @@ -338,7 +338,7 @@ impl FP8x23WTensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index e35c99270..eb97af01b 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -117,23 +117,23 @@ impl I32Tensor of TensorTrait { panic(array!['not supported!']) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } - fn greater(self: @Tensor, other: @Tensor) -> Tensor { + fn greater(self: @Tensor, other: @Tensor) -> Tensor { math::greater::greater(self, other) } - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } @@ -195,11 +195,11 @@ impl I32Tensor of TensorTrait { panic(array!['not supported!']) } - fn xor(self: @Tensor, other: @Tensor) -> Tensor { + fn xor(self: @Tensor, other: @Tensor) -> Tensor { math::xor::xor(self, other) } - fn or(self: @Tensor, other: @Tensor) -> Tensor { + fn or(self: @Tensor, other: @Tensor) -> Tensor { math::or::or(self, other) } @@ -372,7 +372,7 @@ impl I32Tensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index d428ef200..f1b23b57b 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -115,23 +115,23 @@ impl I8Tensor of TensorTrait { panic(array!['not supported!']) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } - fn greater(self: @Tensor, other: @Tensor) -> Tensor { + fn greater(self: @Tensor, other: @Tensor) -> Tensor { math::greater::greater(self, other) } - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } @@ -193,11 +193,11 @@ impl I8Tensor of TensorTrait { panic(array!['not supported!']) } - fn xor(self: @Tensor, other: @Tensor) -> Tensor { + fn xor(self: @Tensor, other: @Tensor) -> Tensor { math::xor::xor(self, other) } - fn or(self: @Tensor, other: @Tensor) -> Tensor { + fn or(self: @Tensor, other: @Tensor) -> Tensor { math::or::or(self, other) } @@ -376,7 +376,7 @@ impl I8Tensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index fff763e3b..09900b955 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -114,23 +114,23 @@ impl U32Tensor of TensorTrait { panic(array!['not supported!']) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } - fn greater(self: @Tensor, other: @Tensor) -> Tensor { + fn greater(self: @Tensor, other: @Tensor) -> Tensor { math::greater::greater(self, other) } - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } @@ -192,11 +192,11 @@ impl U32Tensor of TensorTrait { panic(array!['not supported!']) } - fn xor(self: @Tensor, other: @Tensor) -> Tensor { + fn xor(self: @Tensor, other: @Tensor) -> Tensor { math::xor::xor(self, other) } - fn or(self: @Tensor, other: @Tensor) -> Tensor { + fn or(self: @Tensor, other: @Tensor) -> Tensor { math::or::or(self, other) } @@ -320,7 +320,7 @@ impl U32Tensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } diff --git a/src/operators/tensor/math/and.cairo b/src/operators/tensor/math/and.cairo index c55efce4f..95f7c9bea 100644 --- a/src/operators/tensor/math/and.cairo +++ b/src/operators/tensor/math/and.cairo @@ -1,13 +1,13 @@ use orion::numbers::NumberTrait; -use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, BoolTensor, U32Tensor}; +use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, BoolTensor, I32Tensor}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility }; /// Cf: TensorTrait::and docstring -fn and(y: @Tensor, z: @Tensor) -> Tensor { +fn and(y: @Tensor, z: @Tensor) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); diff --git a/src/operators/tensor/math/equal.cairo b/src/operators/tensor/math/equal.cairo index 6ee8e83bd..96cf68329 100644 --- a/src/operators/tensor/math/equal.cairo +++ b/src/operators/tensor/math/equal.cairo @@ -1,4 +1,4 @@ -use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, U32Tensor}; +use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, I32Tensor}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility }; @@ -6,15 +6,14 @@ use orion::operators::tensor::helpers::{ /// Cf: TensorTrait::equal docstring fn equal< T, - impl BoolTensor: TensorTrait, impl TPartialEq: PartialEq, impl TCopy: Copy, impl TDrop: Drop >( y: @Tensor, z: @Tensor -) -> Tensor { +) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); diff --git a/src/operators/tensor/math/greater.cairo b/src/operators/tensor/math/greater.cairo index f90462b22..c6ff275af 100644 --- a/src/operators/tensor/math/greater.cairo +++ b/src/operators/tensor/math/greater.cairo @@ -1,4 +1,4 @@ -use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; +use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, I32Tensor}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility }; @@ -6,15 +6,14 @@ use orion::operators::tensor::helpers::{ /// Cf: TensorTrait::greater docstring fn greater< T, - impl UsizeFTensor: TensorTrait, impl TPartialOrd: PartialOrd, impl TCopy: Copy, impl TDrop: Drop >( y: @Tensor, z: @Tensor -) -> Tensor { +) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); diff --git a/src/operators/tensor/math/greater_equal.cairo b/src/operators/tensor/math/greater_equal.cairo index bc8e1b045..efd7fb8a9 100644 --- a/src/operators/tensor/math/greater_equal.cairo +++ b/src/operators/tensor/math/greater_equal.cairo @@ -1,4 +1,4 @@ -use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; +use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, I32Tensor}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility }; @@ -6,15 +6,14 @@ use orion::operators::tensor::helpers::{ /// Cf: TensorTrait::greater_equal docstring fn greater_equal< T, - impl UsizeFTensor: TensorTrait, impl TPartialOrd: PartialOrd, impl TCopy: Copy, impl TDrop: Drop >( y: @Tensor, z: @Tensor -) -> Tensor { +) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); diff --git a/src/operators/tensor/math/less.cairo b/src/operators/tensor/math/less.cairo index 4afddc3fb..e590404f6 100644 --- a/src/operators/tensor/math/less.cairo +++ b/src/operators/tensor/math/less.cairo @@ -1,4 +1,4 @@ -use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; +use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, I32Tensor}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility }; @@ -6,15 +6,14 @@ use orion::operators::tensor::helpers::{ /// Cf: TensorTrait::less docstring fn less< T, - impl U32Tensor: TensorTrait, impl TPartialOrd: PartialOrd, impl TCopy: Copy, impl TDrop: Drop >( y: @Tensor, z: @Tensor -) -> Tensor { +) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); diff --git a/src/operators/tensor/math/less_equal.cairo b/src/operators/tensor/math/less_equal.cairo index 8c982a09c..dea786878 100644 --- a/src/operators/tensor/math/less_equal.cairo +++ b/src/operators/tensor/math/less_equal.cairo @@ -1,4 +1,4 @@ -use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; +use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, I32Tensor}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility }; @@ -6,15 +6,14 @@ use orion::operators::tensor::helpers::{ /// Cf: TensorTrait::less_equal docstring fn less_equal< T, - impl UsizeFTensor: TensorTrait, impl TPartialOrd: PartialOrd, impl TCopy: Copy, impl TDrop: Drop >( y: @Tensor, z: @Tensor -) -> Tensor { +) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); diff --git a/src/operators/tensor/math/or.cairo b/src/operators/tensor/math/or.cairo index 13b4697a3..0b93e0400 100644 --- a/src/operators/tensor/math/or.cairo +++ b/src/operators/tensor/math/or.cairo @@ -1,5 +1,5 @@ use orion::numbers::NumberTrait; -use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; +use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, I32Tensor}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility }; @@ -9,14 +9,13 @@ fn or< T, MAG, impl TNumber: NumberTrait, - impl UsizeFTensor: TensorTrait, impl TCopy: Copy, impl TDrop: Drop >( y: @Tensor, z: @Tensor -) -> Tensor { +) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); diff --git a/src/operators/tensor/math/xor.cairo b/src/operators/tensor/math/xor.cairo index 7ed06eba5..b10291b85 100644 --- a/src/operators/tensor/math/xor.cairo +++ b/src/operators/tensor/math/xor.cairo @@ -1,5 +1,5 @@ use orion::numbers::NumberTrait; -use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; +use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, I32Tensor}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility }; @@ -9,14 +9,13 @@ fn xor< T, MAG, impl TNumber: NumberTrait, - impl UsizeFTensor: TensorTrait, impl TCopy: Copy, impl TDrop: Drop >( y: @Tensor, z: @Tensor -) -> Tensor { +) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); diff --git a/tests/lib.cairo b/tests/lib.cairo index f5cecb77d..eb58139db 100644 --- a/tests/lib.cairo +++ b/tests/lib.cairo @@ -1,7 +1,7 @@ -mod numbers; -mod performance; -mod tensor_core; -mod nodes; -mod ml; -mod operators; +// mod numbers; +// mod performance; +// mod tensor_core; +// mod nodes; +// mod ml; +// mod operators; From 2caf078b1d68fb8f036add811ae47c3e6e88c4a9 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Fri, 22 Mar 2024 10:05:10 +0100 Subject: [PATCH 29/68] update tests --- nodegen/node/and.py | 4 +-- nodegen/node/equal.py | 20 +++++------ nodegen/node/greater.py | 20 +++++------ nodegen/node/greater_equal.py | 20 +++++------ nodegen/node/less.py | 20 +++++------ nodegen/node/less_equal.py | 20 +++++------ nodegen/node/or.py | 20 +++++------ nodegen/node/xor.py | 20 +++++------ tests/lib.cairo | 12 +++---- tests/nodes/and_bool.cairo | 8 ++--- tests/nodes/and_bool/input_0.cairo | 6 ++-- tests/nodes/and_bool/input_1.cairo | 8 ++--- tests/nodes/and_bool/output_0.cairo | 8 ++--- tests/nodes/and_bool_broadcast.cairo | 8 ++--- tests/nodes/and_bool_broadcast/input_0.cairo | 28 +++++++-------- tests/nodes/and_bool_broadcast/input_1.cairo | 30 ++++++++-------- tests/nodes/and_bool_broadcast/output_0.cairo | 28 +++++++-------- tests/nodes/equal_fp16x16.cairo | 10 +++--- tests/nodes/equal_fp16x16/input_0.cairo | 28 +++++++-------- tests/nodes/equal_fp16x16/input_1.cairo | 24 ++++++------- tests/nodes/equal_fp16x16/output_0.cairo | 12 +++---- tests/nodes/equal_fp16x16_broadcast.cairo | 10 +++--- .../equal_fp16x16_broadcast/input_0.cairo | 8 ++--- .../equal_fp16x16_broadcast/input_1.cairo | 4 +-- .../equal_fp16x16_broadcast/output_0.cairo | 4 +-- tests/nodes/equal_fp8x23.cairo | 8 ++--- tests/nodes/equal_fp8x23/input_0.cairo | 28 +++++++-------- tests/nodes/equal_fp8x23/input_1.cairo | 28 +++++++-------- tests/nodes/equal_fp8x23/output_0.cairo | 12 +++---- tests/nodes/equal_fp8x23_broadcast.cairo | 8 ++--- .../equal_fp8x23_broadcast/input_0.cairo | 6 ++-- .../equal_fp8x23_broadcast/input_1.cairo | 2 +- .../equal_fp8x23_broadcast/output_0.cairo | 6 ++-- tests/nodes/equal_i32.cairo | 6 ++-- tests/nodes/equal_i32/input_0.cairo | 22 ++++++------ tests/nodes/equal_i32/input_1.cairo | 26 +++++++------- tests/nodes/equal_i32/output_0.cairo | 10 +++--- tests/nodes/equal_i32_broadcast.cairo | 6 ++-- tests/nodes/equal_i32_broadcast/input_0.cairo | 6 ++-- tests/nodes/equal_i32_broadcast/input_1.cairo | 2 +- .../nodes/equal_i32_broadcast/output_0.cairo | 6 ++-- tests/nodes/equal_i8.cairo | 8 ++--- tests/nodes/equal_i8/input_0.cairo | 32 ++++++++--------- tests/nodes/equal_i8/input_1.cairo | 26 +++++++------- tests/nodes/equal_i8/output_0.cairo | 10 +++--- tests/nodes/equal_i8_broadcast.cairo | 8 ++--- tests/nodes/equal_i8_broadcast/input_0.cairo | 6 ++-- tests/nodes/equal_i8_broadcast/input_1.cairo | 2 +- tests/nodes/equal_i8_broadcast/output_0.cairo | 6 ++-- tests/nodes/equal_u32.cairo | 8 +++-- tests/nodes/equal_u32/input_0.cairo | 26 +++++++------- tests/nodes/equal_u32/input_1.cairo | 22 ++++++------ tests/nodes/equal_u32/output_0.cairo | 14 ++++---- tests/nodes/equal_u32_broadcast.cairo | 8 +++-- tests/nodes/equal_u32_broadcast/input_0.cairo | 6 ++-- .../nodes/equal_u32_broadcast/output_0.cairo | 4 +-- tests/nodes/greater_equal_fp16x16.cairo | 16 ++++----- .../nodes/greater_equal_fp16x16/input_0.cairo | 28 +++++++-------- .../nodes/greater_equal_fp16x16/input_1.cairo | 28 +++++++-------- .../greater_equal_fp16x16/output_0.cairo | 17 ++++----- .../greater_equal_fp16x16_broadcast.cairo | 16 ++++----- .../input_0.cairo | 32 ++++++++--------- .../input_1.cairo | 6 ++-- .../output_0.cairo | 23 ++++++------ tests/nodes/greater_equal_fp8x23.cairo | 14 ++++---- .../nodes/greater_equal_fp8x23/input_0.cairo | 30 ++++++++-------- .../nodes/greater_equal_fp8x23/input_1.cairo | 24 ++++++------- .../nodes/greater_equal_fp8x23/output_0.cairo | 15 ++++---- .../greater_equal_fp8x23_broadcast.cairo | 14 ++++---- .../input_0.cairo | 22 ++++++------ .../input_1.cairo | 6 ++-- .../output_0.cairo | 15 ++++---- tests/nodes/greater_equal_i32.cairo | 14 ++++---- tests/nodes/greater_equal_i32/input_0.cairo | 31 ++++++++-------- tests/nodes/greater_equal_i32/input_1.cairo | 31 ++++++++-------- tests/nodes/greater_equal_i32/output_0.cairo | 19 +++++----- tests/nodes/greater_equal_i32_broadcast.cairo | 14 ++++---- .../greater_equal_i32_broadcast/input_0.cairo | 31 ++++++++-------- .../greater_equal_i32_broadcast/input_1.cairo | 9 ++--- .../output_0.cairo | 21 +++++------ tests/nodes/greater_equal_i8.cairo | 16 ++++----- tests/nodes/greater_equal_i8/input_0.cairo | 27 +++++++------- tests/nodes/greater_equal_i8/input_1.cairo | 27 +++++++------- tests/nodes/greater_equal_i8/output_0.cairo | 19 +++++----- tests/nodes/greater_equal_i8_broadcast.cairo | 16 ++++----- .../greater_equal_i8_broadcast/input_0.cairo | 27 +++++++------- .../greater_equal_i8_broadcast/input_1.cairo | 7 ++-- .../greater_equal_i8_broadcast/output_0.cairo | 23 ++++++------ tests/nodes/greater_equal_u32.cairo | 14 ++++---- tests/nodes/greater_equal_u32/input_0.cairo | 29 +++++++-------- tests/nodes/greater_equal_u32/input_1.cairo | 35 ++++++++++--------- tests/nodes/greater_equal_u32/output_0.cairo | 19 +++++----- tests/nodes/greater_equal_u32_broadcast.cairo | 14 ++++---- .../greater_equal_u32_broadcast/input_0.cairo | 27 +++++++------- .../greater_equal_u32_broadcast/input_1.cairo | 7 ++-- .../output_0.cairo | 15 ++++---- tests/nodes/greater_fp16x16.cairo | 16 ++++----- tests/nodes/greater_fp16x16/input_0.cairo | 26 +++++++------- tests/nodes/greater_fp16x16/input_1.cairo | 30 ++++++++-------- tests/nodes/greater_fp16x16/output_0.cairo | 13 +++---- tests/nodes/greater_fp16x16_broadcast.cairo | 16 ++++----- .../greater_fp16x16_broadcast/input_0.cairo | 8 ++--- .../greater_fp16x16_broadcast/input_1.cairo | 6 ++-- .../greater_fp16x16_broadcast/output_0.cairo | 9 ++--- tests/nodes/greater_fp8x23.cairo | 14 ++++---- tests/nodes/greater_fp8x23/input_0.cairo | 34 +++++++++--------- tests/nodes/greater_fp8x23/input_1.cairo | 32 ++++++++--------- tests/nodes/greater_fp8x23/output_0.cairo | 19 +++++----- tests/nodes/greater_fp8x23_broadcast.cairo | 14 ++++---- .../greater_fp8x23_broadcast/input_0.cairo | 6 ++-- .../greater_fp8x23_broadcast/input_1.cairo | 6 ++-- .../greater_fp8x23_broadcast/output_0.cairo | 7 ++-- tests/nodes/greater_i32.cairo | 14 ++++---- tests/nodes/greater_i32/input_0.cairo | 31 ++++++++-------- tests/nodes/greater_i32/input_1.cairo | 35 ++++++++++--------- tests/nodes/greater_i32/output_0.cairo | 19 +++++----- tests/nodes/greater_i32_broadcast.cairo | 14 ++++---- .../nodes/greater_i32_broadcast/input_0.cairo | 9 ++--- .../nodes/greater_i32_broadcast/input_1.cairo | 7 ++-- .../greater_i32_broadcast/output_0.cairo | 7 ++-- tests/nodes/greater_i8.cairo | 16 ++++----- tests/nodes/greater_i8/input_0.cairo | 31 ++++++++-------- tests/nodes/greater_i8/input_1.cairo | 33 ++++++++--------- tests/nodes/greater_i8/output_0.cairo | 21 +++++------ tests/nodes/greater_i8_broadcast.cairo | 16 ++++----- .../nodes/greater_i8_broadcast/input_0.cairo | 7 ++-- .../nodes/greater_i8_broadcast/input_1.cairo | 7 ++-- .../nodes/greater_i8_broadcast/output_0.cairo | 7 ++-- tests/nodes/greater_u32.cairo | 14 ++++---- tests/nodes/greater_u32/input_0.cairo | 33 ++++++++--------- tests/nodes/greater_u32/input_1.cairo | 33 ++++++++--------- tests/nodes/greater_u32/output_0.cairo | 19 +++++----- tests/nodes/greater_u32_broadcast.cairo | 14 ++++---- .../nodes/greater_u32_broadcast/input_0.cairo | 9 ++--- .../nodes/greater_u32_broadcast/input_1.cairo | 7 ++-- .../greater_u32_broadcast/output_0.cairo | 9 ++--- tests/nodes/less_equal_fp16x16.cairo | 16 ++++----- tests/nodes/less_equal_fp16x16/input_0.cairo | 6 ++-- tests/nodes/less_equal_fp16x16/input_1.cairo | 8 ++--- tests/nodes/less_equal_fp16x16/output_0.cairo | 7 ++-- .../nodes/less_equal_fp16x16_broadcast.cairo | 16 ++++----- .../input_0.cairo | 6 ++-- .../input_1.cairo | 6 ++-- .../output_0.cairo | 7 ++-- tests/nodes/less_equal_fp8x23.cairo | 16 ++++----- tests/nodes/less_equal_fp8x23/input_0.cairo | 4 +-- tests/nodes/less_equal_fp8x23/input_1.cairo | 8 ++--- tests/nodes/less_equal_fp8x23/output_0.cairo | 7 ++-- tests/nodes/less_equal_fp8x23_broadcast.cairo | 16 ++++----- .../less_equal_fp8x23_broadcast/input_0.cairo | 6 ++-- .../less_equal_fp8x23_broadcast/input_1.cairo | 4 +-- .../output_0.cairo | 7 ++-- tests/nodes/less_equal_i32.cairo | 12 +++---- tests/nodes/less_equal_i32/input_0.cairo | 7 ++-- tests/nodes/less_equal_i32/input_1.cairo | 7 ++-- tests/nodes/less_equal_i32/output_0.cairo | 7 ++-- tests/nodes/less_equal_i32_broadcast.cairo | 12 +++---- .../less_equal_i32_broadcast/input_0.cairo | 7 ++-- .../less_equal_i32_broadcast/input_1.cairo | 7 ++-- .../less_equal_i32_broadcast/output_0.cairo | 7 ++-- tests/nodes/less_equal_i8.cairo | 16 ++++----- tests/nodes/less_equal_i8/input_0.cairo | 7 ++-- tests/nodes/less_equal_i8/input_1.cairo | 7 ++-- tests/nodes/less_equal_i8/output_0.cairo | 7 ++-- tests/nodes/less_equal_i8_broadcast.cairo | 16 ++++----- .../less_equal_i8_broadcast/input_0.cairo | 9 ++--- .../less_equal_i8_broadcast/input_1.cairo | 7 ++-- .../less_equal_i8_broadcast/output_0.cairo | 11 +++--- tests/nodes/less_equal_u32.cairo | 14 ++++---- tests/nodes/less_equal_u32/input_0.cairo | 7 ++-- tests/nodes/less_equal_u32/input_1.cairo | 7 ++-- tests/nodes/less_equal_u32/output_0.cairo | 7 ++-- tests/nodes/less_equal_u32_broadcast.cairo | 14 ++++---- .../less_equal_u32_broadcast/input_0.cairo | 9 ++--- .../less_equal_u32_broadcast/input_1.cairo | 5 +-- .../less_equal_u32_broadcast/output_0.cairo | 9 ++--- tests/nodes/less_fp16x16.cairo | 10 +++--- tests/nodes/less_fp16x16/input_0.cairo | 26 +++++++------- tests/nodes/less_fp16x16/input_1.cairo | 30 ++++++++-------- tests/nodes/less_fp16x16/output_0.cairo | 16 ++++----- tests/nodes/less_fp16x16_broadcast.cairo | 10 +++--- .../less_fp16x16_broadcast/input_0.cairo | 28 +++++++-------- .../less_fp16x16_broadcast/input_1.cairo | 4 +-- .../less_fp16x16_broadcast/output_0.cairo | 18 +++++----- tests/nodes/less_fp8x23.cairo | 8 ++--- tests/nodes/less_fp8x23/input_0.cairo | 24 ++++++------- tests/nodes/less_fp8x23/input_1.cairo | 26 +++++++------- tests/nodes/less_fp8x23/output_0.cairo | 18 +++++----- tests/nodes/less_fp8x23_broadcast.cairo | 8 ++--- .../nodes/less_fp8x23_broadcast/input_0.cairo | 30 ++++++++-------- .../nodes/less_fp8x23_broadcast/input_1.cairo | 4 +-- .../less_fp8x23_broadcast/output_0.cairo | 20 +++++------ tests/nodes/less_i32.cairo | 6 ++-- tests/nodes/less_i32/input_0.cairo | 30 ++++++++-------- tests/nodes/less_i32/input_1.cairo | 26 +++++++------- tests/nodes/less_i32/output_0.cairo | 16 ++++----- tests/nodes/less_i32_broadcast.cairo | 6 ++-- tests/nodes/less_i32_broadcast/input_0.cairo | 26 +++++++------- tests/nodes/less_i32_broadcast/input_1.cairo | 4 +-- tests/nodes/less_i32_broadcast/output_0.cairo | 20 +++++------ tests/nodes/less_i8.cairo | 10 +++--- tests/nodes/less_i8/input_0.cairo | 30 ++++++++-------- tests/nodes/less_i8/input_1.cairo | 20 +++++------ tests/nodes/less_i8/output_0.cairo | 16 ++++----- tests/nodes/less_i8_broadcast.cairo | 10 +++--- tests/nodes/less_i8_broadcast/input_0.cairo | 34 +++++++++--------- tests/nodes/less_i8_broadcast/input_1.cairo | 4 +-- tests/nodes/less_i8_broadcast/output_0.cairo | 18 +++++----- tests/nodes/less_u32.cairo | 6 ++-- tests/nodes/less_u32/input_0.cairo | 32 ++++++++--------- tests/nodes/less_u32/input_1.cairo | 28 +++++++-------- tests/nodes/less_u32/output_0.cairo | 16 ++++----- tests/nodes/less_u32_broadcast.cairo | 6 ++-- tests/nodes/less_u32_broadcast/input_0.cairo | 26 +++++++------- tests/nodes/less_u32_broadcast/input_1.cairo | 2 +- tests/nodes/less_u32_broadcast/output_0.cairo | 22 ++++++------ tests/nodes/or_fp16x16.cairo | 14 ++++---- tests/nodes/or_fp16x16/input_0.cairo | 26 +++++++------- tests/nodes/or_fp16x16/input_1.cairo | 32 ++++++++--------- tests/nodes/or_fp16x16/output_0.cairo | 7 ++-- tests/nodes/or_fp16x16_broadcast.cairo | 14 ++++---- .../nodes/or_fp16x16_broadcast/input_0.cairo | 6 ++-- .../nodes/or_fp16x16_broadcast/input_1.cairo | 4 +-- .../nodes/or_fp16x16_broadcast/output_0.cairo | 5 +-- tests/nodes/or_fp8x23.cairo | 16 ++++----- tests/nodes/or_fp8x23/input_0.cairo | 26 +++++++------- tests/nodes/or_fp8x23/input_1.cairo | 28 +++++++-------- tests/nodes/or_fp8x23/output_0.cairo | 7 ++-- tests/nodes/or_fp8x23_broadcast.cairo | 16 ++++----- tests/nodes/or_fp8x23_broadcast/input_0.cairo | 8 ++--- tests/nodes/or_fp8x23_broadcast/input_1.cairo | 6 ++-- .../nodes/or_fp8x23_broadcast/output_0.cairo | 5 +-- tests/nodes/or_i32.cairo | 12 +++---- tests/nodes/or_i32/input_0.cairo | 27 +++++++------- tests/nodes/or_i32/input_1.cairo | 31 ++++++++-------- tests/nodes/or_i32/output_0.cairo | 7 ++-- tests/nodes/or_i32_broadcast.cairo | 12 +++---- tests/nodes/or_i32_broadcast/input_0.cairo | 7 ++-- tests/nodes/or_i32_broadcast/input_1.cairo | 7 ++-- tests/nodes/or_i32_broadcast/output_0.cairo | 5 +-- tests/nodes/or_i8.cairo | 14 ++++---- tests/nodes/or_i8/input_0.cairo | 33 ++++++++--------- tests/nodes/or_i8/input_1.cairo | 29 +++++++-------- tests/nodes/or_i8/output_0.cairo | 7 ++-- tests/nodes/or_i8_broadcast.cairo | 14 ++++---- tests/nodes/or_i8_broadcast/input_0.cairo | 9 ++--- tests/nodes/or_i8_broadcast/input_1.cairo | 5 +-- tests/nodes/or_i8_broadcast/output_0.cairo | 5 +-- tests/nodes/or_u32.cairo | 14 ++++---- tests/nodes/or_u32/input_0.cairo | 23 ++++++------ tests/nodes/or_u32/input_1.cairo | 29 +++++++-------- tests/nodes/or_u32/output_0.cairo | 9 ++--- tests/nodes/or_u32_broadcast.cairo | 14 ++++---- tests/nodes/or_u32_broadcast/input_0.cairo | 9 ++--- tests/nodes/or_u32_broadcast/input_1.cairo | 7 ++-- tests/nodes/or_u32_broadcast/output_0.cairo | 5 +-- tests/nodes/xor_fp16x16.cairo | 16 ++++----- tests/nodes/xor_fp16x16/input_0.cairo | 8 ++--- tests/nodes/xor_fp16x16/input_1.cairo | 6 ++-- tests/nodes/xor_fp16x16/output_0.cairo | 9 ++--- tests/nodes/xor_fp16x16_broadcast.cairo | 16 ++++----- .../nodes/xor_fp16x16_broadcast/input_0.cairo | 6 ++-- .../nodes/xor_fp16x16_broadcast/input_1.cairo | 4 +-- .../xor_fp16x16_broadcast/output_0.cairo | 11 +++--- tests/nodes/xor_fp8x23.cairo | 14 ++++---- tests/nodes/xor_fp8x23/input_0.cairo | 34 +++++++++--------- tests/nodes/xor_fp8x23/input_1.cairo | 26 +++++++------- tests/nodes/xor_fp8x23/output_0.cairo | 11 +++--- tests/nodes/xor_fp8x23_broadcast.cairo | 14 ++++---- .../nodes/xor_fp8x23_broadcast/input_0.cairo | 6 ++-- .../nodes/xor_fp8x23_broadcast/input_1.cairo | 6 ++-- .../nodes/xor_fp8x23_broadcast/output_0.cairo | 9 ++--- tests/nodes/xor_i32.cairo | 14 ++++---- tests/nodes/xor_i32/input_0.cairo | 31 ++++++++-------- tests/nodes/xor_i32/input_1.cairo | 31 ++++++++-------- tests/nodes/xor_i32/output_0.cairo | 11 +++--- tests/nodes/xor_i32_broadcast.cairo | 14 ++++---- tests/nodes/xor_i32_broadcast/input_0.cairo | 9 ++--- tests/nodes/xor_i32_broadcast/input_1.cairo | 7 ++-- tests/nodes/xor_i32_broadcast/output_0.cairo | 9 ++--- tests/nodes/xor_i8.cairo | 16 ++++----- tests/nodes/xor_i8/input_0.cairo | 27 +++++++------- tests/nodes/xor_i8/input_1.cairo | 33 ++++++++--------- tests/nodes/xor_i8/output_0.cairo | 15 ++++---- tests/nodes/xor_i8_broadcast.cairo | 16 ++++----- tests/nodes/xor_i8_broadcast/input_0.cairo | 9 ++--- tests/nodes/xor_i8_broadcast/input_1.cairo | 7 ++-- tests/nodes/xor_i8_broadcast/output_0.cairo | 9 ++--- tests/nodes/xor_u32.cairo | 14 ++++---- tests/nodes/xor_u32/input_0.cairo | 33 ++++++++--------- tests/nodes/xor_u32/input_1.cairo | 31 ++++++++-------- tests/nodes/xor_u32/output_0.cairo | 13 +++---- tests/nodes/xor_u32_broadcast.cairo | 14 ++++---- tests/nodes/xor_u32_broadcast/input_0.cairo | 9 ++--- tests/nodes/xor_u32_broadcast/input_1.cairo | 7 ++-- tests/nodes/xor_u32_broadcast/output_0.cairo | 7 ++-- 296 files changed, 2256 insertions(+), 2146 deletions(-) diff --git a/nodegen/node/and.py b/nodegen/node/and.py index f8885702f..8d344398a 100644 --- a/nodegen/node/and.py +++ b/nodegen/node/and.py @@ -13,7 +13,7 @@ def default(): x = Tensor(Dtype.BOOL, x.shape, x.flatten()) y = Tensor(Dtype.BOOL, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "and_bool" make_test([x, y], z, "BoolTensor::and(@input_0, @input_1)", name) @@ -25,7 +25,7 @@ def broadcast(): x = Tensor(Dtype.BOOL, x.shape, x.flatten()) y = Tensor(Dtype.BOOL, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "and_bool_broadcast" make_test([x, y], z, "BoolTensor::and(@input_0, @input_1)", name) diff --git a/nodegen/node/equal.py b/nodegen/node/equal.py index f995ae999..162df0840 100644 --- a/nodegen/node/equal.py +++ b/nodegen/node/equal.py @@ -13,7 +13,7 @@ def default(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "equal_u32" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -25,7 +25,7 @@ def broadcast(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "equal_u32_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -42,7 +42,7 @@ def default(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "equal_i32" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -54,7 +54,7 @@ def broadcast(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "equal_i32_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -71,7 +71,7 @@ def default(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "equal_i8" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -83,7 +83,7 @@ def broadcast(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "equal_i8_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -102,7 +102,7 @@ def default(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "equal_fp8x23" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -116,7 +116,7 @@ def broadcast(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "equal_fp8x23_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -135,7 +135,7 @@ def default(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "equal_fp16x16" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -149,7 +149,7 @@ def broadcast(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "equal_fp16x16_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) diff --git a/nodegen/node/greater.py b/nodegen/node/greater.py index 2fd793847..dc95017b7 100644 --- a/nodegen/node/greater.py +++ b/nodegen/node/greater.py @@ -13,7 +13,7 @@ def default(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "greater_u32" make_test([x, y], z, "input_0.greater(@input_1)", name) @@ -25,7 +25,7 @@ def broadcast(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "greater_u32_broadcast" make_test([x, y], z, "input_0.greater(@input_1)", name) @@ -42,7 +42,7 @@ def default(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "greater_i32" make_test([x, y], z, "input_0.greater(@input_1)", name) @@ -54,7 +54,7 @@ def broadcast(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "greater_i32_broadcast" make_test([x, y], z, "input_0.greater(@input_1)", name) @@ -71,7 +71,7 @@ def default(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "greater_i8" make_test([x, y], z, "input_0.greater(@input_1)", name) @@ -83,7 +83,7 @@ def broadcast(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "greater_i8_broadcast" make_test([x, y], z, "input_0.greater(@input_1)", name) @@ -102,7 +102,7 @@ def default(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "greater_fp8x23" make_test([x, y], z, "input_0.greater(@input_1)", name) @@ -116,7 +116,7 @@ def broadcast(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "greater_fp8x23_broadcast" make_test([x, y], z, "input_0.greater(@input_1)", name) @@ -135,7 +135,7 @@ def default(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "greater_fp16x16" make_test([x, y], z, "input_0.greater(@input_1)", name) @@ -149,7 +149,7 @@ def broadcast(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "greater_fp16x16_broadcast" make_test([x, y], z, "input_0.greater(@input_1)", name) diff --git a/nodegen/node/greater_equal.py b/nodegen/node/greater_equal.py index 2d43f7cc2..a4c54b672 100644 --- a/nodegen/node/greater_equal.py +++ b/nodegen/node/greater_equal.py @@ -13,7 +13,7 @@ def default(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "greater_equal_u32" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) @@ -25,7 +25,7 @@ def broadcast(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "greater_equal_u32_broadcast" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) @@ -42,7 +42,7 @@ def default(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "greater_equal_i32" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) @@ -54,7 +54,7 @@ def broadcast(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "greater_equal_i32_broadcast" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) @@ -71,7 +71,7 @@ def default(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "greater_equal_i8" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) @@ -83,7 +83,7 @@ def broadcast(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "greater_equal_i8_broadcast" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) @@ -102,7 +102,7 @@ def default(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "greater_equal_fp8x23" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) @@ -116,7 +116,7 @@ def broadcast(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "greater_equal_fp8x23_broadcast" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) @@ -135,7 +135,7 @@ def default(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "greater_equal_fp16x16" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) @@ -149,7 +149,7 @@ def broadcast(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "greater_equal_fp16x16_broadcast" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) diff --git a/nodegen/node/less.py b/nodegen/node/less.py index 20b39263d..452ea2732 100644 --- a/nodegen/node/less.py +++ b/nodegen/node/less.py @@ -13,7 +13,7 @@ def default(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_u32" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -25,7 +25,7 @@ def broadcast(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_u32_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -42,7 +42,7 @@ def default(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_i32" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -54,7 +54,7 @@ def broadcast(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_i32_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -71,7 +71,7 @@ def default(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_i8" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -83,7 +83,7 @@ def broadcast(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_i8_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -102,7 +102,7 @@ def default(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_fp8x23" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -116,7 +116,7 @@ def broadcast(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_fp8x23_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -135,7 +135,7 @@ def default(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_fp16x16" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -149,7 +149,7 @@ def broadcast(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_fp16x16_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) diff --git a/nodegen/node/less_equal.py b/nodegen/node/less_equal.py index c54040331..2a29d0816 100644 --- a/nodegen/node/less_equal.py +++ b/nodegen/node/less_equal.py @@ -13,7 +13,7 @@ def default(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_u32" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -25,7 +25,7 @@ def broadcast(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_u32_broadcast" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -42,7 +42,7 @@ def default(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_i32" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -54,7 +54,7 @@ def broadcast(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_i32_broadcast" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -71,7 +71,7 @@ def default(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_i8" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -83,7 +83,7 @@ def broadcast(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_i8_broadcast" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -102,7 +102,7 @@ def default(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_fp8x23" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -116,7 +116,7 @@ def broadcast(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_fp8x23_broadcast" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -135,7 +135,7 @@ def default(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_fp16x16" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -149,7 +149,7 @@ def broadcast(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_fp16x16_broadcast" make_test([x, y], z, "input_0.less_equal(@input_1)", name) diff --git a/nodegen/node/or.py b/nodegen/node/or.py index a39d2adb3..630795182 100644 --- a/nodegen/node/or.py +++ b/nodegen/node/or.py @@ -13,7 +13,7 @@ def default(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "or_u32" make_test([x, y], z, "input_0.or(@input_1)", name) @@ -25,7 +25,7 @@ def broadcast(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "or_u32_broadcast" make_test([x, y], z, "input_0.or(@input_1)", name) @@ -42,7 +42,7 @@ def default(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "or_i32" make_test([x, y], z, "input_0.or(@input_1)", name) @@ -54,7 +54,7 @@ def broadcast(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "or_i32_broadcast" make_test([x, y], z, "input_0.or(@input_1)", name) @@ -71,7 +71,7 @@ def default(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "or_i8" make_test([x, y], z, "input_0.or(@input_1)", name) @@ -83,7 +83,7 @@ def broadcast(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "or_i8_broadcast" make_test([x, y], z, "input_0.or(@input_1)", name) @@ -102,7 +102,7 @@ def default(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "or_fp8x23" make_test([x, y], z, "input_0.or(@input_1)", name) @@ -116,7 +116,7 @@ def broadcast(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "or_fp8x23_broadcast" make_test([x, y], z, "input_0.or(@input_1)", name) @@ -135,7 +135,7 @@ def default(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "or_fp16x16" make_test([x, y], z, "input_0.or(@input_1)", name) @@ -149,7 +149,7 @@ def broadcast(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "or_fp16x16_broadcast" make_test([x, y], z, "input_0.or(@input_1)", name) diff --git a/nodegen/node/xor.py b/nodegen/node/xor.py index bd8c025c6..e9e9f37db 100644 --- a/nodegen/node/xor.py +++ b/nodegen/node/xor.py @@ -14,7 +14,7 @@ def default(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "xor_u32" make_test([x, y], z, "input_0.xor(@input_1)", name) @@ -26,7 +26,7 @@ def broadcast(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "xor_u32_broadcast" make_test([x, y], z, "input_0.xor(@input_1)", name) @@ -43,7 +43,7 @@ def default(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "xor_i32" make_test([x, y], z, "input_0.xor(@input_1)", name) @@ -55,7 +55,7 @@ def broadcast(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "xor_i32_broadcast" make_test([x, y], z, "input_0.xor(@input_1)", name) @@ -72,7 +72,7 @@ def default(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "xor_i8" make_test([x, y], z, "input_0.xor(@input_1)", name) @@ -84,7 +84,7 @@ def broadcast(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "xor_i8_broadcast" make_test([x, y], z, "input_0.xor(@input_1)", name) @@ -103,7 +103,7 @@ def default(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "xor_fp8x23" make_test([x, y], z, "input_0.xor(@input_1)", name) @@ -117,7 +117,7 @@ def broadcast(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "xor_fp8x23_broadcast" make_test([x, y], z, "input_0.xor(@input_1)", name) @@ -136,7 +136,7 @@ def default(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "xor_fp16x16" make_test([x, y], z, "input_0.xor(@input_1)", name) @@ -150,7 +150,7 @@ def broadcast(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "xor_fp16x16_broadcast" make_test([x, y], z, "input_0.xor(@input_1)", name) diff --git a/tests/lib.cairo b/tests/lib.cairo index eb58139db..f5cecb77d 100644 --- a/tests/lib.cairo +++ b/tests/lib.cairo @@ -1,7 +1,7 @@ -// mod numbers; -// mod performance; -// mod tensor_core; -// mod nodes; -// mod ml; -// mod operators; +mod numbers; +mod performance; +mod tensor_core; +mod nodes; +mod ml; +mod operators; diff --git a/tests/nodes/and_bool.cairo b/tests/nodes/and_bool.cairo index d89a0a213..c9af2ad8c 100644 --- a/tests/nodes/and_bool.cairo +++ b/tests/nodes/and_bool.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::I32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::BoolTensor; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/and_bool/input_0.cairo b/tests/nodes/and_bool/input_0.cairo index 76cf09e07..4789edaac 100644 --- a/tests/nodes/and_bool/input_0.cairo +++ b/tests/nodes/and_bool/input_0.cairo @@ -8,15 +8,15 @@ fn input_0() -> Tensor { shape.append(4); let mut data = ArrayTrait::new(); - data.append(false); - data.append(true); data.append(true); data.append(false); data.append(false); - data.append(true); data.append(false); data.append(true); data.append(false); + data.append(true); + data.append(true); + data.append(true); data.append(false); data.append(false); data.append(false); diff --git a/tests/nodes/and_bool/input_1.cairo b/tests/nodes/and_bool/input_1.cairo index 96fa7fe95..9eba836e3 100644 --- a/tests/nodes/and_bool/input_1.cairo +++ b/tests/nodes/and_bool/input_1.cairo @@ -11,14 +11,14 @@ fn input_1() -> Tensor { data.append(true); data.append(false); data.append(false); - data.append(false); - data.append(true); data.append(true); data.append(true); data.append(false); data.append(true); - data.append(true); - data.append(true); data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/and_bool/output_0.cairo b/tests/nodes/and_bool/output_0.cairo index 365e209c0..20c2ce71d 100644 --- a/tests/nodes/and_bool/output_0.cairo +++ b/tests/nodes/and_bool/output_0.cairo @@ -1,21 +1,21 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(4); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); data.append(1); data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); diff --git a/tests/nodes/and_bool_broadcast.cairo b/tests/nodes/and_bool_broadcast.cairo index 839103063..dd58790b0 100644 --- a/tests/nodes/and_bool_broadcast.cairo +++ b/tests/nodes/and_bool_broadcast.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::I32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::BoolTensor; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/and_bool_broadcast/input_0.cairo b/tests/nodes/and_bool_broadcast/input_0.cairo index bb1a94372..63959e870 100644 --- a/tests/nodes/and_bool_broadcast/input_0.cairo +++ b/tests/nodes/and_bool_broadcast/input_0.cairo @@ -9,65 +9,65 @@ fn input_0() -> Tensor { shape.append(5); let mut data = ArrayTrait::new(); - data.append(false); data.append(true); data.append(false); - data.append(false); data.append(true); - data.append(false); - data.append(false); data.append(true); data.append(false); data.append(false); data.append(false); data.append(true); - data.append(false); - data.append(true); - data.append(false); data.append(true); data.append(false); data.append(false); data.append(true); data.append(true); - data.append(true); + data.append(false); data.append(false); data.append(true); - data.append(true); + data.append(false); data.append(true); data.append(true); data.append(true); data.append(false); data.append(false); data.append(false); - data.append(false); - data.append(false); + data.append(true); data.append(true); data.append(false); data.append(false); data.append(false); + data.append(true); + data.append(true); + data.append(true); data.append(false); data.append(false); data.append(true); + data.append(true); + data.append(true); + data.append(true); data.append(false); data.append(false); data.append(false); data.append(true); + data.append(true); + data.append(false); data.append(false); data.append(false); data.append(true); data.append(true); + data.append(false); data.append(true); data.append(true); data.append(false); data.append(true); data.append(false); data.append(true); - data.append(false); data.append(true); data.append(true); - data.append(false); - data.append(false); data.append(true); data.append(true); + data.append(true); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/and_bool_broadcast/input_1.cairo b/tests/nodes/and_bool_broadcast/input_1.cairo index 3dfd5a03e..f855f8f1f 100644 --- a/tests/nodes/and_bool_broadcast/input_1.cairo +++ b/tests/nodes/and_bool_broadcast/input_1.cairo @@ -10,63 +10,63 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(false); - data.append(true); - data.append(true); - data.append(true); + data.append(false); data.append(true); data.append(false); data.append(false); data.append(false); + data.append(false); data.append(true); data.append(false); + data.append(true); data.append(false); + data.append(true); data.append(false); data.append(true); + data.append(false); data.append(true); data.append(false); data.append(true); + data.append(true); + data.append(true); data.append(false); data.append(false); data.append(true); data.append(true); + data.append(false); data.append(true); data.append(false); data.append(true); + data.append(false); data.append(true); data.append(true); + data.append(false); data.append(true); data.append(true); - data.append(false); - data.append(false); data.append(true); data.append(true); - data.append(false); - data.append(false); - data.append(false); data.append(true); data.append(true); data.append(true); data.append(true); data.append(false); - data.append(true); + data.append(false); data.append(false); data.append(true); data.append(false); data.append(false); - data.append(true); data.append(false); - data.append(true); - data.append(true); data.append(false); data.append(false); - data.append(true); data.append(false); data.append(true); - data.append(true); data.append(false); data.append(false); - data.append(true); data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(true); data.append(false); data.append(true); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/and_bool_broadcast/output_0.cairo b/tests/nodes/and_bool_broadcast/output_0.cairo index 0b1b699f0..583a3ddab 100644 --- a/tests/nodes/and_bool_broadcast/output_0.cairo +++ b/tests/nodes/and_bool_broadcast/output_0.cairo @@ -1,9 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(4); @@ -11,34 +11,42 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); + data.append(0); data.append(1); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); data.append(1); data.append(0); data.append(1); + data.append(1); + data.append(1); + data.append(0); data.append(0); data.append(0); data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); data.append(1); data.append(1); data.append(0); + data.append(0); data.append(1); data.append(1); data.append(1); data.append(1); - data.append(1); - data.append(0); - data.append(0); data.append(0); data.append(0); data.append(0); @@ -58,17 +66,9 @@ fn output_0() -> Tensor { data.append(0); data.append(1); data.append(1); - data.append(0); - data.append(0); data.append(1); - data.append(0); data.append(1); data.append(0); data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16.cairo b/tests/nodes/equal_fp16x16.cairo index 65c0a784f..0e2796da9 100644 --- a/tests/nodes/equal_fp16x16.cairo +++ b/tests/nodes/equal_fp16x16.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::FP16x16TensorPartialEq; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/equal_fp16x16/input_0.cairo b/tests/nodes/equal_fp16x16/input_0.cairo index 80fc69258..fa5ce11ff 100644 --- a/tests/nodes/equal_fp16x16/input_0.cairo +++ b/tests/nodes/equal_fp16x16/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16/input_1.cairo b/tests/nodes/equal_fp16x16/input_1.cairo index 28be847cd..13878efc5 100644 --- a/tests/nodes/equal_fp16x16/input_1.cairo +++ b/tests/nodes/equal_fp16x16/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16/output_0.cairo b/tests/nodes/equal_fp16x16/output_0.cairo index ac7620e41..102a86146 100644 --- a/tests/nodes/equal_fp16x16/output_0.cairo +++ b/tests/nodes/equal_fp16x16/output_0.cairo @@ -1,9 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -13,13 +13,15 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); data.append(0); + data.append(1); + data.append(1); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(0); data.append(1); @@ -27,15 +29,13 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); - data.append(1); - data.append(0); data.append(0); data.append(0); data.append(1); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16_broadcast.cairo b/tests/nodes/equal_fp16x16_broadcast.cairo index b01301226..d5247e8ca 100644 --- a/tests/nodes/equal_fp16x16_broadcast.cairo +++ b/tests/nodes/equal_fp16x16_broadcast.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::FP16x16TensorPartialEq; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/equal_fp16x16_broadcast/input_0.cairo b/tests/nodes/equal_fp16x16_broadcast/input_0.cairo index 63bb6f2bd..2be78eac4 100644 --- a/tests/nodes/equal_fp16x16_broadcast/input_0.cairo +++ b/tests/nodes/equal_fp16x16_broadcast/input_0.cairo @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16_broadcast/input_1.cairo b/tests/nodes/equal_fp16x16_broadcast/input_1.cairo index 62fb47cf7..1d2d646c2 100644 --- a/tests/nodes/equal_fp16x16_broadcast/input_1.cairo +++ b/tests/nodes/equal_fp16x16_broadcast/input_1.cairo @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16_broadcast/output_0.cairo b/tests/nodes/equal_fp16x16_broadcast/output_0.cairo index 1b2b1839c..6e83f693f 100644 --- a/tests/nodes/equal_fp16x16_broadcast/output_0.cairo +++ b/tests/nodes/equal_fp16x16_broadcast/output_0.cairo @@ -1,9 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); diff --git a/tests/nodes/equal_fp8x23.cairo b/tests/nodes/equal_fp8x23.cairo index 2cb21620e..1f5c1a9dc 100644 --- a/tests/nodes/equal_fp8x23.cairo +++ b/tests/nodes/equal_fp8x23.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::I32TensorPartialEq; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/equal_fp8x23/input_0.cairo b/tests/nodes/equal_fp8x23/input_0.cairo index e4da062fe..55009dd09 100644 --- a/tests/nodes/equal_fp8x23/input_0.cairo +++ b/tests/nodes/equal_fp8x23/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp8x23/input_1.cairo b/tests/nodes/equal_fp8x23/input_1.cairo index dd0be993a..fe7d75198 100644 --- a/tests/nodes/equal_fp8x23/input_1.cairo +++ b/tests/nodes/equal_fp8x23/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp8x23/output_0.cairo b/tests/nodes/equal_fp8x23/output_0.cairo index 07372d8e0..e3cabff16 100644 --- a/tests/nodes/equal_fp8x23/output_0.cairo +++ b/tests/nodes/equal_fp8x23/output_0.cairo @@ -1,19 +1,18 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); data.append(0); data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); @@ -26,16 +25,17 @@ fn output_0() -> Tensor { data.append(0); data.append(1); data.append(0); - data.append(0); data.append(1); data.append(0); data.append(0); data.append(0); - data.append(0); data.append(1); data.append(0); data.append(0); data.append(0); data.append(0); + data.append(0); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp8x23_broadcast.cairo b/tests/nodes/equal_fp8x23_broadcast.cairo index f21904ba0..83d41e98d 100644 --- a/tests/nodes/equal_fp8x23_broadcast.cairo +++ b/tests/nodes/equal_fp8x23_broadcast.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::I32TensorPartialEq; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/equal_fp8x23_broadcast/input_0.cairo b/tests/nodes/equal_fp8x23_broadcast/input_0.cairo index b3a74749a..d8bc8c715 100644 --- a/tests/nodes/equal_fp8x23_broadcast/input_0.cairo +++ b/tests/nodes/equal_fp8x23_broadcast/input_0.cairo @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp8x23_broadcast/input_1.cairo b/tests/nodes/equal_fp8x23_broadcast/input_1.cairo index 3d5d07d10..d956bca2e 100644 --- a/tests/nodes/equal_fp8x23_broadcast/input_1.cairo +++ b/tests/nodes/equal_fp8x23_broadcast/input_1.cairo @@ -10,6 +10,6 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp8x23_broadcast/output_0.cairo b/tests/nodes/equal_fp8x23_broadcast/output_0.cairo index 705f7ffc6..f90460c3d 100644 --- a/tests/nodes/equal_fp8x23_broadcast/output_0.cairo +++ b/tests/nodes/equal_fp8x23_broadcast/output_0.cairo @@ -1,15 +1,15 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); diff --git a/tests/nodes/equal_i32.cairo b/tests/nodes/equal_i32.cairo index 2f86f4afd..176a2566f 100644 --- a/tests/nodes/equal_i32.cairo +++ b/tests/nodes/equal_i32.cairo @@ -3,13 +3,11 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::I32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/equal_i32/input_0.cairo b/tests/nodes/equal_i32/input_0.cairo index 9dcfbae82..78e20a27c 100644 --- a/tests/nodes/equal_i32/input_0.cairo +++ b/tests/nodes/equal_i32/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); + data.append(-3); + data.append(-1); + data.append(-2); + data.append(2); + data.append(-1); data.append(-2); data.append(-1); + data.append(-2); data.append(-3); data.append(-1); + data.append(2); data.append(0); + data.append(1); data.append(0); - data.append(-3); - data.append(-3); data.append(0); - data.append(-2); - data.append(-2); - data.append(-2); data.append(-3); data.append(1); - data.append(-1); data.append(0); + data.append(2); data.append(-2); data.append(2); + data.append(-2); data.append(-3); - data.append(-1); data.append(-2); data.append(-1); - data.append(-2); data.append(-1); - data.append(2); - data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i32/input_1.cairo b/tests/nodes/equal_i32/input_1.cairo index e1863a501..5b85fd8c3 100644 --- a/tests/nodes/equal_i32/input_1.cairo +++ b/tests/nodes/equal_i32/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(2); - data.append(0); data.append(1); - data.append(-1); - data.append(2); - data.append(2); data.append(1); - data.append(0); data.append(-1); data.append(-3); + data.append(1); + data.append(-2); data.append(-1); + data.append(1); data.append(-3); + data.append(0); data.append(-1); - data.append(-3); - data.append(-3); + data.append(1); + data.append(-1); + data.append(1); data.append(0); data.append(1); + data.append(-2); + data.append(-2); + data.append(-2); data.append(-3); data.append(-3); data.append(-3); + data.append(2); + data.append(0); + data.append(0); data.append(-3); data.append(-3); - data.append(1); - data.append(-3); - data.append(2); - data.append(-2); - data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i32/output_0.cairo b/tests/nodes/equal_i32/output_0.cairo index e4fb77e12..b20beee14 100644 --- a/tests/nodes/equal_i32/output_0.cairo +++ b/tests/nodes/equal_i32/output_0.cairo @@ -1,9 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -15,9 +15,10 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); + data.append(1); + data.append(1); data.append(0); - data.append(0); - data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); @@ -29,7 +30,6 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(0); data.append(0); diff --git a/tests/nodes/equal_i32_broadcast.cairo b/tests/nodes/equal_i32_broadcast.cairo index f21bed4f5..348d93461 100644 --- a/tests/nodes/equal_i32_broadcast.cairo +++ b/tests/nodes/equal_i32_broadcast.cairo @@ -3,13 +3,11 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::I32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/equal_i32_broadcast/input_0.cairo b/tests/nodes/equal_i32_broadcast/input_0.cairo index 2e440937c..03adb3015 100644 --- a/tests/nodes/equal_i32_broadcast/input_0.cairo +++ b/tests/nodes/equal_i32_broadcast/input_0.cairo @@ -10,8 +10,8 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); - data.append(-2); - data.append(-1); - data.append(-1); + data.append(1); + data.append(0); + data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i32_broadcast/input_1.cairo b/tests/nodes/equal_i32_broadcast/input_1.cairo index d0bca2c7f..26cafa62e 100644 --- a/tests/nodes/equal_i32_broadcast/input_1.cairo +++ b/tests/nodes/equal_i32_broadcast/input_1.cairo @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); data.append(-2); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i32_broadcast/output_0.cairo b/tests/nodes/equal_i32_broadcast/output_0.cairo index ce4201837..6e83f693f 100644 --- a/tests/nodes/equal_i32_broadcast/output_0.cairo +++ b/tests/nodes/equal_i32_broadcast/output_0.cairo @@ -1,15 +1,15 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); + data.append(0); data.append(1); data.append(0); data.append(0); diff --git a/tests/nodes/equal_i8.cairo b/tests/nodes/equal_i8.cairo index 3f3d3661d..1c2ac5eda 100644 --- a/tests/nodes/equal_i8.cairo +++ b/tests/nodes/equal_i8.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/equal_i8/input_0.cairo b/tests/nodes/equal_i8/input_0.cairo index 281102a79..a5aecfe37 100644 --- a/tests/nodes/equal_i8/input_0.cairo +++ b/tests/nodes/equal_i8/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); - data.append(-1); - data.append(-1); - data.append(1); data.append(-2); data.append(2); - data.append(-2); - data.append(-1); + data.append(2); data.append(1); + data.append(2); data.append(-3); - data.append(-1); data.append(-2); + data.append(2); + data.append(-1); + data.append(0); data.append(1); data.append(-1); - data.append(-3); + data.append(2); + data.append(-2); data.append(1); data.append(-3); - data.append(-3); - data.append(0); - data.append(-3); - data.append(-1); - data.append(-1); data.append(-1); data.append(0); - data.append(-3); - data.append(-1); - data.append(-1); + data.append(-2); + data.append(1); + data.append(2); + data.append(1); + data.append(2); + data.append(-2); + data.append(2); + data.append(1); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i8/input_1.cairo b/tests/nodes/equal_i8/input_1.cairo index 3e13a87c6..9348c6f87 100644 --- a/tests/nodes/equal_i8/input_1.cairo +++ b/tests/nodes/equal_i8/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-1); - data.append(-1); - data.append(-3); data.append(0); data.append(-2); - data.append(1); - data.append(2); - data.append(0); - data.append(2); - data.append(2); + data.append(-2); + data.append(-2); data.append(0); data.append(2); + data.append(-3); data.append(-1); data.append(1); + data.append(-3); + data.append(1); + data.append(0); + data.append(1); + data.append(-2); + data.append(0); data.append(1); - data.append(2); data.append(0); data.append(-2); data.append(-3); + data.append(0); data.append(-2); data.append(-1); - data.append(-3); - data.append(-3); - data.append(1); data.append(1); + data.append(2); + data.append(-1); data.append(-3); - data.append(-3); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i8/output_0.cairo b/tests/nodes/equal_i8/output_0.cairo index 7c4daa680..5cb81ce4d 100644 --- a/tests/nodes/equal_i8/output_0.cairo +++ b/tests/nodes/equal_i8/output_0.cairo @@ -1,9 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -11,10 +11,8 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); - data.append(1); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(0); data.append(0); @@ -22,7 +20,10 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); + data.append(1); + data.append(0); data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); @@ -30,7 +31,6 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(0); data.append(0); diff --git a/tests/nodes/equal_i8_broadcast.cairo b/tests/nodes/equal_i8_broadcast.cairo index b8f66a412..36d76b4e9 100644 --- a/tests/nodes/equal_i8_broadcast.cairo +++ b/tests/nodes/equal_i8_broadcast.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/equal_i8_broadcast/input_0.cairo b/tests/nodes/equal_i8_broadcast/input_0.cairo index 295af2729..b4d674e13 100644 --- a/tests/nodes/equal_i8_broadcast/input_0.cairo +++ b/tests/nodes/equal_i8_broadcast/input_0.cairo @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-2); - data.append(1); - data.append(0); + data.append(-1); + data.append(-3); + data.append(2); data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i8_broadcast/input_1.cairo b/tests/nodes/equal_i8_broadcast/input_1.cairo index aa5546ee5..e09cd290a 100644 --- a/tests/nodes/equal_i8_broadcast/input_1.cairo +++ b/tests/nodes/equal_i8_broadcast/input_1.cairo @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-1); data.append(0); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i8_broadcast/output_0.cairo b/tests/nodes/equal_i8_broadcast/output_0.cairo index 705f7ffc6..be93162e2 100644 --- a/tests/nodes/equal_i8_broadcast/output_0.cairo +++ b/tests/nodes/equal_i8_broadcast/output_0.cairo @@ -1,9 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); @@ -12,6 +12,6 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); - data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32.cairo b/tests/nodes/equal_u32.cairo index c53070394..b5dd76f1b 100644 --- a/tests/nodes/equal_u32.cairo +++ b/tests/nodes/equal_u32.cairo @@ -3,11 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::operators::tensor::U32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/equal_u32/input_0.cairo b/tests/nodes/equal_u32/input_0.cairo index bcdfa640a..1101fa40c 100644 --- a/tests/nodes/equal_u32/input_0.cairo +++ b/tests/nodes/equal_u32/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(2); - data.append(0); - data.append(3); - data.append(0); data.append(1); - data.append(3); data.append(1); - data.append(4); data.append(2); data.append(4); data.append(5); - data.append(4); - data.append(5); - data.append(5); + data.append(3); + data.append(1); data.append(0); + data.append(3); + data.append(4); + data.append(1); + data.append(2); data.append(4); + data.append(1); + data.append(2); data.append(3); data.append(3); data.append(0); + data.append(0); data.append(3); + data.append(5); data.append(1); data.append(5); - data.append(4); - data.append(4); - data.append(3); - data.append(4); + data.append(1); + data.append(1); data.append(5); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32/input_1.cairo b/tests/nodes/equal_u32/input_1.cairo index d64072a1a..b9d3a0f2a 100644 --- a/tests/nodes/equal_u32/input_1.cairo +++ b/tests/nodes/equal_u32/input_1.cairo @@ -11,31 +11,31 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); - data.append(2); - data.append(2); data.append(1); + data.append(4); + data.append(4); + data.append(4); data.append(2); - data.append(3); data.append(2); data.append(4); - data.append(0); - data.append(3); - data.append(2); - data.append(0); - data.append(0); data.append(4); + data.append(4); + data.append(5); data.append(3); data.append(2); data.append(4); - data.append(2); + data.append(3); data.append(0); data.append(2); - data.append(4); data.append(3); data.append(0); data.append(2); - data.append(5); + data.append(4); data.append(2); data.append(5); + data.append(4); + data.append(1); + data.append(5); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32/output_0.cairo b/tests/nodes/equal_u32/output_0.cairo index c07a9a491..bf289dc4b 100644 --- a/tests/nodes/equal_u32/output_0.cairo +++ b/tests/nodes/equal_u32/output_0.cairo @@ -1,9 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -11,10 +11,6 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); data.append(1); data.append(0); data.append(1); @@ -23,19 +19,23 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(0); data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); + data.append(1); data.append(0); data.append(1); + data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32_broadcast.cairo b/tests/nodes/equal_u32_broadcast.cairo index 94e9022be..ffe162a58 100644 --- a/tests/nodes/equal_u32_broadcast.cairo +++ b/tests/nodes/equal_u32_broadcast.cairo @@ -3,11 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::operators::tensor::U32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/equal_u32_broadcast/input_0.cairo b/tests/nodes/equal_u32_broadcast/input_0.cairo index f0fc27e10..2948b2d0f 100644 --- a/tests/nodes/equal_u32_broadcast/input_0.cairo +++ b/tests/nodes/equal_u32_broadcast/input_0.cairo @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(4); + data.append(1); + data.append(5); + data.append(1); data.append(3); - data.append(4); - data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32_broadcast/output_0.cairo b/tests/nodes/equal_u32_broadcast/output_0.cairo index 705f7ffc6..0e89fbf7a 100644 --- a/tests/nodes/equal_u32_broadcast/output_0.cairo +++ b/tests/nodes/equal_u32_broadcast/output_0.cairo @@ -1,9 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); diff --git a/tests/nodes/greater_equal_fp16x16.cairo b/tests/nodes/greater_equal_fp16x16.cairo index ade17fab9..c4115953d 100644 --- a/tests/nodes/greater_equal_fp16x16.cairo +++ b/tests/nodes/greater_equal_fp16x16.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; -use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; #[test] #[available_gas(2000000000)] fn test_greater_equal_fp16x16() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.greater_equal(@input_1); + let y_0 = input_0.greater_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/greater_equal_fp16x16/input_0.cairo b/tests/nodes/greater_equal_fp16x16/input_0.cairo index 2357065aa..8483baa87 100644 --- a/tests/nodes/greater_equal_fp16x16/input_0.cairo +++ b/tests/nodes/greater_equal_fp16x16/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -11,31 +11,31 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_fp16x16/input_1.cairo b/tests/nodes/greater_equal_fp16x16/input_1.cairo index 2aca7c1e4..71948a671 100644 --- a/tests/nodes/greater_equal_fp16x16/input_1.cairo +++ b/tests/nodes/greater_equal_fp16x16/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_fp16x16/output_0.cairo b/tests/nodes/greater_equal_fp16x16/output_0.cairo index 3838fd015..6743e67ee 100644 --- a/tests/nodes/greater_equal_fp16x16/output_0.cairo +++ b/tests/nodes/greater_equal_fp16x16/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -10,31 +11,31 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); - data.append(0); - data.append(0); data.append(1); data.append(1); + data.append(0); + data.append(0); data.append(1); data.append(1); data.append(1); data.append(1); + data.append(0); + data.append(0); + data.append(0); data.append(1); + data.append(0); data.append(1); data.append(1); data.append(1); data.append(0); data.append(1); data.append(1); - data.append(0); data.append(1); data.append(0); data.append(0); data.append(1); - data.append(0); data.append(1); data.append(1); - data.append(0); data.append(1); - data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_fp16x16_broadcast.cairo b/tests/nodes/greater_equal_fp16x16_broadcast.cairo index 88b0d8221..5a121c54d 100644 --- a/tests/nodes/greater_equal_fp16x16_broadcast.cairo +++ b/tests/nodes/greater_equal_fp16x16_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; -use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; #[test] #[available_gas(2000000000)] fn test_greater_equal_fp16x16_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.greater_equal(@input_1); + let y_0 = input_0.greater_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/greater_equal_fp16x16_broadcast/input_0.cairo b/tests/nodes/greater_equal_fp16x16_broadcast/input_0.cairo index d5b16c572..061494d06 100644 --- a/tests/nodes/greater_equal_fp16x16_broadcast/input_0.cairo +++ b/tests/nodes/greater_equal_fp16x16_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_fp16x16_broadcast/input_1.cairo b/tests/nodes/greater_equal_fp16x16_broadcast/input_1.cairo index 2f4253292..0ba9a94ab 100644 --- a/tests/nodes/greater_equal_fp16x16_broadcast/input_1.cairo +++ b/tests/nodes/greater_equal_fp16x16_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -10,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_fp16x16_broadcast/output_0.cairo b/tests/nodes/greater_equal_fp16x16_broadcast/output_0.cairo index 0779d09f7..5c225df37 100644 --- a/tests/nodes/greater_equal_fp16x16_broadcast/output_0.cairo +++ b/tests/nodes/greater_equal_fp16x16_broadcast/output_0.cairo @@ -1,23 +1,18 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); data.append(1); - data.append(0); data.append(1); data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(1); data.append(0); data.append(0); data.append(1); @@ -25,10 +20,12 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(1); + data.append(1); + data.append(1); data.append(0); - data.append(0); - data.append(0); - data.append(0); + data.append(1); + data.append(1); + data.append(1); data.append(0); data.append(0); data.append(1); @@ -36,5 +33,9 @@ fn output_0() -> Tensor { data.append(0); data.append(1); data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_fp8x23.cairo b/tests/nodes/greater_equal_fp8x23.cairo index fe69d50b0..d2aaba160 100644 --- a/tests/nodes/greater_equal_fp8x23.cairo +++ b/tests/nodes/greater_equal_fp8x23.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; use orion::operators::tensor::FP8x23TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; #[test] #[available_gas(2000000000)] fn test_greater_equal_fp8x23() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.greater_equal(@input_1); + let y_0 = input_0.greater_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/greater_equal_fp8x23/input_0.cairo b/tests/nodes/greater_equal_fp8x23/input_0.cairo index dc99936c5..e6be700e3 100644 --- a/tests/nodes/greater_equal_fp8x23/input_0.cairo +++ b/tests/nodes/greater_equal_fp8x23/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -11,31 +11,31 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_fp8x23/input_1.cairo b/tests/nodes/greater_equal_fp8x23/input_1.cairo index 181dad555..abba030c2 100644 --- a/tests/nodes/greater_equal_fp8x23/input_1.cairo +++ b/tests/nodes/greater_equal_fp8x23/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_fp8x23/output_0.cairo b/tests/nodes/greater_equal_fp8x23/output_0.cairo index 0102d6e42..d81121a87 100644 --- a/tests/nodes/greater_equal_fp8x23/output_0.cairo +++ b/tests/nodes/greater_equal_fp8x23/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -11,15 +12,11 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(1); data.append(1); data.append(0); data.append(1); data.append(0); - data.append(0); + data.append(1); data.append(0); data.append(0); data.append(1); @@ -27,6 +24,7 @@ fn output_0() -> Tensor { data.append(0); data.append(1); data.append(0); + data.append(1); data.append(0); data.append(1); data.append(1); @@ -34,7 +32,10 @@ fn output_0() -> Tensor { data.append(1); data.append(1); data.append(1); + data.append(1); + data.append(1); data.append(0); data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_fp8x23_broadcast.cairo b/tests/nodes/greater_equal_fp8x23_broadcast.cairo index 4cb89ba2a..76296131d 100644 --- a/tests/nodes/greater_equal_fp8x23_broadcast.cairo +++ b/tests/nodes/greater_equal_fp8x23_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; use orion::operators::tensor::FP8x23TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; #[test] #[available_gas(2000000000)] fn test_greater_equal_fp8x23_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.greater_equal(@input_1); + let y_0 = input_0.greater_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/greater_equal_fp8x23_broadcast/input_0.cairo b/tests/nodes/greater_equal_fp8x23_broadcast/input_0.cairo index fc181dce7..4d715888c 100644 --- a/tests/nodes/greater_equal_fp8x23_broadcast/input_0.cairo +++ b/tests/nodes/greater_equal_fp8x23_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -12,30 +12,30 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_fp8x23_broadcast/input_1.cairo b/tests/nodes/greater_equal_fp8x23_broadcast/input_1.cairo index b776b414a..a0c9b6c37 100644 --- a/tests/nodes/greater_equal_fp8x23_broadcast/input_1.cairo +++ b/tests/nodes/greater_equal_fp8x23_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -10,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_fp8x23_broadcast/output_0.cairo b/tests/nodes/greater_equal_fp8x23_broadcast/output_0.cairo index d8fc4b6d7..48282ca99 100644 --- a/tests/nodes/greater_equal_fp8x23_broadcast/output_0.cairo +++ b/tests/nodes/greater_equal_fp8x23_broadcast/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -10,19 +11,15 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(1); data.append(1); data.append(1); data.append(1); data.append(1); + data.append(0); data.append(1); data.append(1); data.append(1); data.append(1); - data.append(0); data.append(1); data.append(1); data.append(1); @@ -31,10 +28,14 @@ fn output_0() -> Tensor { data.append(0); data.append(1); data.append(1); + data.append(1); data.append(0); data.append(0); data.append(1); data.append(1); data.append(1); + data.append(1); + data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_i32.cairo b/tests/nodes/greater_equal_i32.cairo index 7fde407d8..f3322db91 100644 --- a/tests/nodes/greater_equal_i32.cairo +++ b/tests/nodes/greater_equal_i32.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; #[test] #[available_gas(2000000000)] fn test_greater_equal_i32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.greater_equal(@input_1); + let y_0 = input_0.greater_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/greater_equal_i32/input_0.cairo b/tests/nodes/greater_equal_i32/input_0.cairo index 462799e9e..1f121a788 100644 --- a/tests/nodes/greater_equal_i32/input_0.cairo +++ b/tests/nodes/greater_equal_i32/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,31 +11,31 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); - data.append(-2); - data.append(-1); - data.append(0); - data.append(-2); - data.append(-2); - data.append(-1); data.append(-1); data.append(2); data.append(-1); - data.append(-2); - data.append(0); + data.append(-1); data.append(2); data.append(-3); - data.append(2); data.append(-2); + data.append(-3); + data.append(-3); + data.append(-1); data.append(1); + data.append(-3); + data.append(2); + data.append(2); + data.append(2); data.append(0); - data.append(-1); + data.append(-2); + data.append(-2); data.append(0); - data.append(-3); data.append(0); - data.append(-3); data.append(-2); - data.append(1); + data.append(-2); data.append(-1); - data.append(2); + data.append(-1); + data.append(-2); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_i32/input_1.cairo b/tests/nodes/greater_equal_i32/input_1.cairo index 71c047607..63912b68e 100644 --- a/tests/nodes/greater_equal_i32/input_1.cairo +++ b/tests/nodes/greater_equal_i32/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(2); data.append(-2); + data.append(0); + data.append(0); data.append(-3); + data.append(2); + data.append(-1); + data.append(-3); + data.append(-1); + data.append(-2); data.append(1); data.append(-3); data.append(-2); - data.append(0); + data.append(-1); + data.append(-2); data.append(-1); data.append(1); + data.append(-2); data.append(-3); - data.append(-3); - data.append(2); data.append(1); - data.append(0); - data.append(-3); - data.append(-1); - data.append(-2); data.append(2); - data.append(0); - data.append(-2); data.append(2); - data.append(1); + data.append(-3); + data.append(-2); data.append(-1); + data.append(1); data.append(0); - data.append(0); - data.append(0); - data.append(-3); + data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_i32/output_0.cairo b/tests/nodes/greater_equal_i32/output_0.cairo index f511f554d..5ba6fc6fc 100644 --- a/tests/nodes/greater_equal_i32/output_0.cairo +++ b/tests/nodes/greater_equal_i32/output_0.cairo @@ -1,16 +1,15 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); data.append(1); data.append(0); data.append(1); @@ -18,22 +17,24 @@ fn output_0() -> Tensor { data.append(0); data.append(1); data.append(1); + data.append(0); + data.append(0); + data.append(0); data.append(1); data.append(1); data.append(0); data.append(1); - data.append(0); data.append(1); - data.append(0); data.append(1); - data.append(0); - data.append(0); data.append(1); - data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); data.append(1); + data.append(1); + data.append(1); + data.append(0); data.append(0); data.append(1); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/greater_equal_i32_broadcast.cairo b/tests/nodes/greater_equal_i32_broadcast.cairo index 3d5663ed1..51e7dce37 100644 --- a/tests/nodes/greater_equal_i32_broadcast.cairo +++ b/tests/nodes/greater_equal_i32_broadcast.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; #[test] #[available_gas(2000000000)] fn test_greater_equal_i32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.greater_equal(@input_1); + let y_0 = input_0.greater_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/greater_equal_i32_broadcast/input_0.cairo b/tests/nodes/greater_equal_i32_broadcast/input_0.cairo index b65e8387d..0e2bb0a87 100644 --- a/tests/nodes/greater_equal_i32_broadcast/input_0.cairo +++ b/tests/nodes/greater_equal_i32_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(1); + data.append(-3); + data.append(0); + data.append(2); + data.append(1); + data.append(-3); + data.append(-1); + data.append(1); data.append(0); data.append(0); + data.append(-2); data.append(-1); + data.append(1); data.append(-1); - data.append(2); - data.append(2); data.append(-1); - data.append(2); - data.append(0); data.append(-1); data.append(2); data.append(-2); data.append(1); data.append(2); data.append(1); - data.append(-1); - data.append(-1); - data.append(-3); - data.append(-2); - data.append(-2); data.append(1); - data.append(0); + data.append(1); data.append(-1); - data.append(-2); - data.append(-3); - data.append(-2); - data.append(-2); + data.append(2); + data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_i32_broadcast/input_1.cairo b/tests/nodes/greater_equal_i32_broadcast/input_1.cairo index 05ca97a1c..60c233254 100644 --- a/tests/nodes/greater_equal_i32_broadcast/input_1.cairo +++ b/tests/nodes/greater_equal_i32_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(-2); + data.append(1); + data.append(-1); + data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_i32_broadcast/output_0.cairo b/tests/nodes/greater_equal_i32_broadcast/output_0.cairo index 4b42da2bc..78caa692e 100644 --- a/tests/nodes/greater_equal_i32_broadcast/output_0.cairo +++ b/tests/nodes/greater_equal_i32_broadcast/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -10,30 +11,30 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); - data.append(1); data.append(0); data.append(0); data.append(1); data.append(1); + data.append(0); data.append(1); data.append(1); data.append(1); data.append(0); - data.append(1); + data.append(0); data.append(0); data.append(1); data.append(1); data.append(1); data.append(1); data.append(1); - data.append(0); - data.append(0); - data.append(0); data.append(1); data.append(1); - data.append(0); - data.append(0); - data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(1); data.append(1); data.append(1); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/greater_equal_i8.cairo b/tests/nodes/greater_equal_i8.cairo index 7e408313f..eacd7c574 100644 --- a/tests/nodes/greater_equal_i8.cairo +++ b/tests/nodes/greater_equal_i8.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_greater_equal_i8() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.greater_equal(@input_1); + let y_0 = input_0.greater_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/greater_equal_i8/input_0.cairo b/tests/nodes/greater_equal_i8/input_0.cairo index 029abdd11..7158fd825 100644 --- a/tests/nodes/greater_equal_i8/input_0.cairo +++ b/tests/nodes/greater_equal_i8/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,31 +11,31 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(2); - data.append(-2); + data.append(-1); data.append(2); data.append(1); + data.append(-3); data.append(0); + data.append(-2); + data.append(-2); + data.append(-2); + data.append(-1); data.append(-3); + data.append(1); data.append(2); - data.append(-3); data.append(-2); data.append(-3); - data.append(2); - data.append(-1); + data.append(-3); + data.append(-2); data.append(0); + data.append(2); data.append(-1); data.append(0); - data.append(1); - data.append(-2); - data.append(-2); - data.append(-1); - data.append(-2); data.append(-2); data.append(-3); - data.append(1); - data.append(1); data.append(0); - data.append(1); + data.append(0); + data.append(-3); data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_i8/input_1.cairo b/tests/nodes/greater_equal_i8/input_1.cairo index a13f4e52b..2d4f3f080 100644 --- a/tests/nodes/greater_equal_i8/input_1.cairo +++ b/tests/nodes/greater_equal_i8/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-1); - data.append(-2); - data.append(-3); data.append(1); - data.append(0); - data.append(-3); - data.append(0); data.append(-3); - data.append(-2); data.append(-1); + data.append(1); data.append(-1); data.append(2); + data.append(-1); data.append(-3); data.append(1); + data.append(-1); + data.append(1); + data.append(2); + data.append(0); data.append(1); data.append(1); data.append(-2); - data.append(2); - data.append(2); - data.append(-1); - data.append(-1); + data.append(-3); data.append(-3); data.append(-1); + data.append(-2); + data.append(2); + data.append(-2); + data.append(-1); data.append(0); data.append(-1); + data.append(1); data.append(2); - data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_i8/output_0.cairo b/tests/nodes/greater_equal_i8/output_0.cairo index 6166962c9..989556af5 100644 --- a/tests/nodes/greater_equal_i8/output_0.cairo +++ b/tests/nodes/greater_equal_i8/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -13,11 +14,8 @@ fn output_0() -> Tensor { data.append(1); data.append(1); data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(1); + data.append(0); + data.append(0); data.append(0); data.append(1); data.append(0); @@ -25,8 +23,6 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(1); - data.append(1); - data.append(0); data.append(0); data.append(0); data.append(0); @@ -35,6 +31,11 @@ fn output_0() -> Tensor { data.append(1); data.append(1); data.append(0); + data.append(1); + data.append(0); + data.append(1); + data.append(1); + data.append(0); data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_i8_broadcast.cairo b/tests/nodes/greater_equal_i8_broadcast.cairo index 53ffd4c26..287f60b66 100644 --- a/tests/nodes/greater_equal_i8_broadcast.cairo +++ b/tests/nodes/greater_equal_i8_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_greater_equal_i8_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.greater_equal(@input_1); + let y_0 = input_0.greater_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/greater_equal_i8_broadcast/input_0.cairo b/tests/nodes/greater_equal_i8_broadcast/input_0.cairo index 721b747ee..cbc4f5a1e 100644 --- a/tests/nodes/greater_equal_i8_broadcast/input_0.cairo +++ b/tests/nodes/greater_equal_i8_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -11,30 +12,30 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(-2); data.append(0); + data.append(2); + data.append(2); + data.append(1); + data.append(-1); + data.append(-1); data.append(-3); - data.append(-2); data.append(0); + data.append(1); + data.append(-3); data.append(-3); data.append(1); - data.append(-1); - data.append(0); - data.append(2); - data.append(0); data.append(2); + data.append(-2); data.append(2); + data.append(0); data.append(-3); - data.append(-1); + data.append(1); data.append(0); data.append(0); - data.append(-3); data.append(2); - data.append(-2); - data.append(-3); data.append(2); - data.append(1); - data.append(-1); data.append(-1); - data.append(1); + data.append(-2); data.append(-3); + data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_i8_broadcast/input_1.cairo b/tests/nodes/greater_equal_i8_broadcast/input_1.cairo index acb5683ff..7b6f77e67 100644 --- a/tests/nodes/greater_equal_i8_broadcast/input_1.cairo +++ b/tests/nodes/greater_equal_i8_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); + data.append(-3); + data.append(-2); data.append(2); - data.append(2); - data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_i8_broadcast/output_0.cairo b/tests/nodes/greater_equal_i8_broadcast/output_0.cairo index d1b238ebc..38f6a5fdf 100644 --- a/tests/nodes/greater_equal_i8_broadcast/output_0.cairo +++ b/tests/nodes/greater_equal_i8_broadcast/output_0.cairo @@ -1,17 +1,21 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(1); data.append(0); data.append(0); data.append(0); @@ -19,22 +23,19 @@ fn output_0() -> Tensor { data.append(1); data.append(1); data.append(1); - data.append(0); + data.append(1); data.append(1); data.append(1); data.append(0); data.append(0); data.append(1); data.append(1); - data.append(0); data.append(1); - data.append(0); - data.append(0); data.append(1); - data.append(0); - data.append(0); data.append(1); data.append(1); data.append(0); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_u32.cairo b/tests/nodes/greater_equal_u32.cairo index e548d6cd6..2cf132189 100644 --- a/tests/nodes/greater_equal_u32.cairo +++ b/tests/nodes/greater_equal_u32.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_greater_equal_u32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.greater_equal(@input_1); + let y_0 = input_0.greater_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/greater_equal_u32/input_0.cairo b/tests/nodes/greater_equal_u32/input_0.cairo index 5a6f968eb..972937864 100644 --- a/tests/nodes/greater_equal_u32/input_0.cairo +++ b/tests/nodes/greater_equal_u32/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(2); - data.append(4); data.append(5); - data.append(4); - data.append(3); - data.append(1); - data.append(0); data.append(0); data.append(5); + data.append(3); + data.append(3); + data.append(1); data.append(1); data.append(2); - data.append(4); - data.append(3); + data.append(1); data.append(2); + data.append(0); + data.append(3); data.append(4); - data.append(5); data.append(4); - data.append(0); data.append(1); + data.append(0); data.append(2); data.append(0); - data.append(5); data.append(1); - data.append(0); data.append(5); + data.append(2); data.append(4); - data.append(1); + data.append(3); + data.append(4); + data.append(5); + data.append(4); + data.append(5); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_u32/input_1.cairo b/tests/nodes/greater_equal_u32/input_1.cairo index 1130efb80..5af7f9955 100644 --- a/tests/nodes/greater_equal_u32/input_1.cairo +++ b/tests/nodes/greater_equal_u32/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(2); + data.append(3); + data.append(2); data.append(0); - data.append(4); data.append(5); data.append(5); - data.append(4); - data.append(1); - data.append(1); - data.append(5); - data.append(0); - data.append(3); - data.append(3); + data.append(2); + data.append(2); + data.append(2); data.append(1); data.append(0); - data.append(0); + data.append(2); + data.append(5); data.append(5); - data.append(0); data.append(4); - data.append(1); - data.append(1); data.append(4); data.append(5); + data.append(5); + data.append(5); + data.append(0); + data.append(1); data.append(1); + data.append(3); + data.append(5); data.append(0); - data.append(4); - data.append(4); - data.append(4); - data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_u32/output_0.cairo b/tests/nodes/greater_equal_u32/output_0.cairo index 3f8b7f407..712606141 100644 --- a/tests/nodes/greater_equal_u32/output_0.cairo +++ b/tests/nodes/greater_equal_u32/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -10,31 +11,31 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); + data.append(0); + data.append(1); data.append(1); data.append(1); - data.append(0); - data.append(0); data.append(1); data.append(0); data.append(0); - data.append(1); data.append(0); + data.append(1); data.append(0); data.append(1); data.append(1); data.append(1); data.append(0); - data.append(1); - data.append(1); data.append(0); - data.append(1); data.append(0); data.append(0); - data.append(1); + data.append(0); data.append(1); data.append(0); data.append(1); data.append(1); + data.append(1); + data.append(1); data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_u32_broadcast.cairo b/tests/nodes/greater_equal_u32_broadcast.cairo index 5d40f386b..e7b4cd338 100644 --- a/tests/nodes/greater_equal_u32_broadcast.cairo +++ b/tests/nodes/greater_equal_u32_broadcast.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_greater_equal_u32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.greater_equal(@input_1); + let y_0 = input_0.greater_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/greater_equal_u32_broadcast/input_0.cairo b/tests/nodes/greater_equal_u32_broadcast/input_0.cairo index 5bd64a3fa..8ceb24b67 100644 --- a/tests/nodes/greater_equal_u32_broadcast/input_0.cairo +++ b/tests/nodes/greater_equal_u32_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(3); data.append(1); + data.append(3); + data.append(3); + data.append(4); + data.append(2); data.append(2); data.append(4); - data.append(5); + data.append(4); + data.append(3); + data.append(0); data.append(1); data.append(5); + data.append(0); + data.append(0); + data.append(0); data.append(4); - data.append(1); data.append(3); data.append(1); data.append(3); - data.append(0); - data.append(5); - data.append(5); - data.append(4); - data.append(0); + data.append(3); data.append(5); data.append(0); - data.append(5); - data.append(2); data.append(4); data.append(4); data.append(2); data.append(1); - data.append(3); - data.append(1); - data.append(3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_u32_broadcast/input_1.cairo b/tests/nodes/greater_equal_u32_broadcast/input_1.cairo index 365ec35ab..d30f37f4d 100644 --- a/tests/nodes/greater_equal_u32_broadcast/input_1.cairo +++ b/tests/nodes/greater_equal_u32_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(1); - data.append(2); + data.append(3); data.append(2); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_u32_broadcast/output_0.cairo b/tests/nodes/greater_equal_u32_broadcast/output_0.cairo index d97dd0a71..cdc03dbeb 100644 --- a/tests/nodes/greater_equal_u32_broadcast/output_0.cairo +++ b/tests/nodes/greater_equal_u32_broadcast/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -10,31 +11,31 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); + data.append(0); data.append(1); data.append(1); data.append(1); - data.append(0); data.append(1); data.append(1); - data.append(0); data.append(1); data.append(1); data.append(1); data.append(0); + data.append(0); data.append(1); + data.append(0); + data.append(0); data.append(1); data.append(1); - data.append(0); data.append(1); data.append(0); data.append(1); data.append(1); data.append(1); + data.append(0); data.append(1); data.append(1); - data.append(0); data.append(1); - data.append(0); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_fp16x16.cairo b/tests/nodes/greater_fp16x16.cairo index 2a7a03143..9e4fcd4c2 100644 --- a/tests/nodes/greater_fp16x16.cairo +++ b/tests/nodes/greater_fp16x16.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; -use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::U32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; #[test] #[available_gas(2000000000)] fn test_greater_fp16x16() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.greater(@input_1); + let y_0 = input_0.greater(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/greater_fp16x16/input_0.cairo b/tests/nodes/greater_fp16x16/input_0.cairo index 62da96aaf..3945db72b 100644 --- a/tests/nodes/greater_fp16x16/input_0.cairo +++ b/tests/nodes/greater_fp16x16/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_fp16x16/input_1.cairo b/tests/nodes/greater_fp16x16/input_1.cairo index ab8cd104f..e68118f8b 100644 --- a/tests/nodes/greater_fp16x16/input_1.cairo +++ b/tests/nodes/greater_fp16x16/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_fp16x16/output_0.cairo b/tests/nodes/greater_fp16x16/output_0.cairo index 48e9c4f38..e23a4e669 100644 --- a/tests/nodes/greater_fp16x16/output_0.cairo +++ b/tests/nodes/greater_fp16x16/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -11,28 +12,28 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); data.append(0); + data.append(1); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(0); data.append(1); - data.append(1); - data.append(0); data.append(0); data.append(0); + data.append(1); data.append(0); data.append(0); data.append(1); + data.append(1); data.append(0); data.append(0); data.append(1); data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(0); data.append(0); diff --git a/tests/nodes/greater_fp16x16_broadcast.cairo b/tests/nodes/greater_fp16x16_broadcast.cairo index 26a9933e4..1ae074724 100644 --- a/tests/nodes/greater_fp16x16_broadcast.cairo +++ b/tests/nodes/greater_fp16x16_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; -use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::U32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; #[test] #[available_gas(2000000000)] fn test_greater_fp16x16_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.greater(@input_1); + let y_0 = input_0.greater(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/greater_fp16x16_broadcast/input_0.cairo b/tests/nodes/greater_fp16x16_broadcast/input_0.cairo index db67a5335..f62741a74 100644 --- a/tests/nodes/greater_fp16x16_broadcast/input_0.cairo +++ b/tests/nodes/greater_fp16x16_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_fp16x16_broadcast/input_1.cairo b/tests/nodes/greater_fp16x16_broadcast/input_1.cairo index 1a7845d54..138779aa4 100644 --- a/tests/nodes/greater_fp16x16_broadcast/input_1.cairo +++ b/tests/nodes/greater_fp16x16_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_fp16x16_broadcast/output_0.cairo b/tests/nodes/greater_fp16x16_broadcast/output_0.cairo index 983d99cbd..0e89fbf7a 100644 --- a/tests/nodes/greater_fp16x16_broadcast/output_0.cairo +++ b/tests/nodes/greater_fp16x16_broadcast/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); data.append(0); - data.append(1); + data.append(0); + data.append(0); data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_fp8x23.cairo b/tests/nodes/greater_fp8x23.cairo index 235e35bb9..811be552b 100644 --- a/tests/nodes/greater_fp8x23.cairo +++ b/tests/nodes/greater_fp8x23.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_greater_fp8x23() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.greater(@input_1); + let y_0 = input_0.greater(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/greater_fp8x23/input_0.cairo b/tests/nodes/greater_fp8x23/input_0.cairo index ab05d7c9d..103fae1dd 100644 --- a/tests/nodes/greater_fp8x23/input_0.cairo +++ b/tests/nodes/greater_fp8x23/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_fp8x23/input_1.cairo b/tests/nodes/greater_fp8x23/input_1.cairo index 583b859dd..52bf5b265 100644 --- a/tests/nodes/greater_fp8x23/input_1.cairo +++ b/tests/nodes/greater_fp8x23/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_fp8x23/output_0.cairo b/tests/nodes/greater_fp8x23/output_0.cairo index 2c4aa7864..7c5b41ca7 100644 --- a/tests/nodes/greater_fp8x23/output_0.cairo +++ b/tests/nodes/greater_fp8x23/output_0.cairo @@ -1,36 +1,37 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); + data.append(1); data.append(1); data.append(0); data.append(0); data.append(0); data.append(0); data.append(1); - data.append(1); - data.append(1); + data.append(0); data.append(0); data.append(1); + data.append(0); data.append(1); - data.append(1); + data.append(0); data.append(1); data.append(0); data.append(1); data.append(0); data.append(0); - data.append(1); data.append(0); - data.append(1); - data.append(1); + data.append(0); + data.append(0); + data.append(0); data.append(0); data.append(0); data.append(0); diff --git a/tests/nodes/greater_fp8x23_broadcast.cairo b/tests/nodes/greater_fp8x23_broadcast.cairo index d7fa1d5a8..7ace215fb 100644 --- a/tests/nodes/greater_fp8x23_broadcast.cairo +++ b/tests/nodes/greater_fp8x23_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_greater_fp8x23_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.greater(@input_1); + let y_0 = input_0.greater(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/greater_fp8x23_broadcast/input_0.cairo b/tests/nodes/greater_fp8x23_broadcast/input_0.cairo index a18362086..ce997ece1 100644 --- a/tests/nodes/greater_fp8x23_broadcast/input_0.cairo +++ b/tests/nodes/greater_fp8x23_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -11,7 +11,7 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_fp8x23_broadcast/input_1.cairo b/tests/nodes/greater_fp8x23_broadcast/input_1.cairo index 2bb7abce1..4f8364ca5 100644 --- a/tests/nodes/greater_fp8x23_broadcast/input_1.cairo +++ b/tests/nodes/greater_fp8x23_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_fp8x23_broadcast/output_0.cairo b/tests/nodes/greater_fp8x23_broadcast/output_0.cairo index 983d99cbd..f90460c3d 100644 --- a/tests/nodes/greater_fp8x23_broadcast/output_0.cairo +++ b/tests/nodes/greater_fp8x23_broadcast/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); @@ -10,7 +11,7 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); data.append(0); - data.append(1); + data.append(0); data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_i32.cairo b/tests/nodes/greater_i32.cairo index 4d1ce8975..9d999181b 100644 --- a/tests/nodes/greater_i32.cairo +++ b/tests/nodes/greater_i32.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; #[test] #[available_gas(2000000000)] fn test_greater_i32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.greater(@input_1); + let y_0 = input_0.greater(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/greater_i32/input_0.cairo b/tests/nodes/greater_i32/input_0.cairo index c5a8c900a..e3f61960e 100644 --- a/tests/nodes/greater_i32/input_0.cairo +++ b/tests/nodes/greater_i32/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-3); data.append(0); - data.append(-2); - data.append(-3); - data.append(-3); - data.append(2); - data.append(-1); - data.append(-3); data.append(1); - data.append(-3); + data.append(0); + data.append(2); data.append(2); data.append(0); data.append(-1); - data.append(1); - data.append(-1); + data.append(0); data.append(2); - data.append(1); - data.append(1); data.append(-2); - data.append(2); data.append(-1); - data.append(-2); data.append(0); + data.append(1); + data.append(1); + data.append(-3); + data.append(0); + data.append(-2); data.append(-3); + data.append(1); data.append(-3); + data.append(0); + data.append(1); + data.append(-1); data.append(2); data.append(0); + data.append(-2); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_i32/input_1.cairo b/tests/nodes/greater_i32/input_1.cairo index 7f2ac7cf6..d2de3b6cd 100644 --- a/tests/nodes/greater_i32/input_1.cairo +++ b/tests/nodes/greater_i32/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-1); - data.append(0); data.append(-3); data.append(1); data.append(0); data.append(0); - data.append(-1); - data.append(-1); data.append(-2); - data.append(-1); - data.append(2); - data.append(1); - data.append(2); data.append(-3); - data.append(2); - data.append(2); + data.append(-2); + data.append(-2); + data.append(-2); + data.append(-2); data.append(0); - data.append(2); - data.append(2); + data.append(-2); + data.append(-2); + data.append(1); + data.append(1); + data.append(-2); + data.append(-2); data.append(0); data.append(-3); - data.append(-1); - data.append(-3); - data.append(0); data.append(-2); - data.append(1); + data.append(0); + data.append(0); + data.append(2); + data.append(0); data.append(-2); + data.append(-1); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_i32/output_0.cairo b/tests/nodes/greater_i32/output_0.cairo index 5375527f6..dc7cfb3af 100644 --- a/tests/nodes/greater_i32/output_0.cairo +++ b/tests/nodes/greater_i32/output_0.cairo @@ -1,25 +1,28 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); data.append(1); data.append(0); data.append(0); data.append(1); - data.append(0); - data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(1); data.append(1); data.append(0); data.append(0); + data.append(1); + data.append(1); data.append(0); data.append(0); data.append(1); @@ -29,12 +32,10 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(1); - data.append(1); data.append(0); data.append(1); + data.append(1); data.append(0); data.append(0); - data.append(1); - data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_i32_broadcast.cairo b/tests/nodes/greater_i32_broadcast.cairo index 7ebe716df..6642b10af 100644 --- a/tests/nodes/greater_i32_broadcast.cairo +++ b/tests/nodes/greater_i32_broadcast.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; #[test] #[available_gas(2000000000)] fn test_greater_i32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.greater(@input_1); + let y_0 = input_0.greater(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/greater_i32_broadcast/input_0.cairo b/tests/nodes/greater_i32_broadcast/input_0.cairo index ee130c085..b1c4959f8 100644 --- a/tests/nodes/greater_i32_broadcast/input_0.cairo +++ b/tests/nodes/greater_i32_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(0); data.append(-2); + data.append(1); + data.append(2); + data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_i32_broadcast/input_1.cairo b/tests/nodes/greater_i32_broadcast/input_1.cairo index 790fd4ff9..801e78f8e 100644 --- a/tests/nodes/greater_i32_broadcast/input_1.cairo +++ b/tests/nodes/greater_i32_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); + data.append(2); + data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_i32_broadcast/output_0.cairo b/tests/nodes/greater_i32_broadcast/output_0.cairo index 417a71252..6e83f693f 100644 --- a/tests/nodes/greater_i32_broadcast/output_0.cairo +++ b/tests/nodes/greater_i32_broadcast/output_0.cairo @@ -1,15 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); data.append(0); - data.append(0); + data.append(1); data.append(0); data.append(0); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/greater_i8.cairo b/tests/nodes/greater_i8.cairo index 1eec0cc13..b73f17d8f 100644 --- a/tests/nodes/greater_i8.cairo +++ b/tests/nodes/greater_i8.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; use orion::operators::tensor::I8TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; -use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_greater_i8() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.greater(@input_1); + let y_0 = input_0.greater(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/greater_i8/input_0.cairo b/tests/nodes/greater_i8/input_0.cairo index 0d74b0aee..00a84a925 100644 --- a/tests/nodes/greater_i8/input_0.cairo +++ b/tests/nodes/greater_i8/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(2); data.append(1); - data.append(-2); + data.append(0); + data.append(0); + data.append(0); + data.append(-3); data.append(1); - data.append(-2); - data.append(-1); data.append(1); data.append(0); data.append(-3); - data.append(-2); - data.append(2); - data.append(0); data.append(-3); - data.append(-2); - data.append(2); - data.append(-1); - data.append(0); + data.append(-3); data.append(-3); data.append(1); data.append(0); - data.append(-2); - data.append(0); - data.append(-3); - data.append(-2); data.append(1); data.append(1); data.append(1); + data.append(2); data.append(-1); + data.append(2); + data.append(2); + data.append(1); + data.append(2); + data.append(2); + data.append(-1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_i8/input_1.cairo b/tests/nodes/greater_i8/input_1.cairo index 79831e085..e623f5495 100644 --- a/tests/nodes/greater_i8/input_1.cairo +++ b/tests/nodes/greater_i8/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); + data.append(2); data.append(-2); data.append(0); data.append(-1); + data.append(-3); + data.append(0); + data.append(0); data.append(1); + data.append(-3); + data.append(-3); data.append(1); + data.append(2); + data.append(0); + data.append(-3); + data.append(-3); data.append(-1); + data.append(1); data.append(2); + data.append(-2); data.append(2); - data.append(1); - data.append(1); data.append(-2); - data.append(1); - data.append(-1); data.append(-2); - data.append(1); - data.append(-1); + data.append(-2); + data.append(-2); data.append(2); + data.append(-2); data.append(1); - data.append(1); - data.append(-1); - data.append(2); - data.append(-1); - data.append(2); - data.append(-3); - data.append(2); - data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_i8/output_0.cairo b/tests/nodes/greater_i8/output_0.cairo index 3864fbbe4..076748be3 100644 --- a/tests/nodes/greater_i8/output_0.cairo +++ b/tests/nodes/greater_i8/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); data.append(0); data.append(1); data.append(0); - data.append(0); - data.append(0); data.append(1); - data.append(0); - data.append(0); data.append(1); data.append(0); - data.append(0); - data.append(0); - data.append(1); data.append(1); data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); data.append(0); data.append(1); + data.append(1); + data.append(1); data.append(0); data.append(0); + data.append(1); data.append(0); data.append(1); + data.append(1); + data.append(1); + data.append(1); data.append(0); data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_i8_broadcast.cairo b/tests/nodes/greater_i8_broadcast.cairo index c1acc3950..57b03f337 100644 --- a/tests/nodes/greater_i8_broadcast.cairo +++ b/tests/nodes/greater_i8_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; use orion::operators::tensor::I8TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; -use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_greater_i8_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.greater(@input_1); + let y_0 = input_0.greater(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/greater_i8_broadcast/input_0.cairo b/tests/nodes/greater_i8_broadcast/input_0.cairo index 7fadef780..509936b85 100644 --- a/tests/nodes/greater_i8_broadcast/input_0.cairo +++ b/tests/nodes/greater_i8_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-2); - data.append(-3); data.append(-3); data.append(0); + data.append(-3); + data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_i8_broadcast/input_1.cairo b/tests/nodes/greater_i8_broadcast/input_1.cairo index 2f8cf491d..74374511f 100644 --- a/tests/nodes/greater_i8_broadcast/input_1.cairo +++ b/tests/nodes/greater_i8_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-3); - data.append(2); + data.append(-2); + data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_i8_broadcast/output_0.cairo b/tests/nodes/greater_i8_broadcast/output_0.cairo index d2fab9fd0..6e83f693f 100644 --- a/tests/nodes/greater_i8_broadcast/output_0.cairo +++ b/tests/nodes/greater_i8_broadcast/output_0.cairo @@ -1,15 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); data.append(0); + data.append(1); data.append(0); data.append(0); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/greater_u32.cairo b/tests/nodes/greater_u32.cairo index 2eb538434..71b0af1c1 100644 --- a/tests/nodes/greater_u32.cairo +++ b/tests/nodes/greater_u32.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; #[test] #[available_gas(2000000000)] fn test_greater_u32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.greater(@input_1); + let y_0 = input_0.greater(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/greater_u32/input_0.cairo b/tests/nodes/greater_u32/input_0.cairo index b2ec6068a..cb20dbd6f 100644 --- a/tests/nodes/greater_u32/input_0.cairo +++ b/tests/nodes/greater_u32/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(4); - data.append(3); - data.append(3); - data.append(5); - data.append(1); + data.append(0); data.append(2); data.append(3); - data.append(1); - data.append(5); - data.append(5); - data.append(5); data.append(3); - data.append(4); - data.append(0); data.append(2); data.append(2); + data.append(0); + data.append(0); + data.append(2); + data.append(0); data.append(4); data.append(1); + data.append(1); data.append(2); - data.append(2); - data.append(0); data.append(1); - data.append(3); - data.append(4); + data.append(1); + data.append(0); + data.append(2); data.append(4); data.append(3); + data.append(1); + data.append(3); + data.append(1); + data.append(2); + data.append(0); data.append(5); + data.append(3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_u32/input_1.cairo b/tests/nodes/greater_u32/input_1.cairo index 2346c043c..7a4ea0c19 100644 --- a/tests/nodes/greater_u32/input_1.cairo +++ b/tests/nodes/greater_u32/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(4); data.append(0); data.append(2); - data.append(2); - data.append(4); - data.append(4); - data.append(1); data.append(0); - data.append(0); - data.append(4); - data.append(2); - data.append(2); + data.append(3); data.append(5); + data.append(0); + data.append(3); + data.append(3); data.append(1); - data.append(4); - data.append(4); + data.append(2); data.append(1); - data.append(5); - data.append(5); data.append(3); + data.append(1); + data.append(4); data.append(3); data.append(2); - data.append(4); data.append(3); + data.append(2); data.append(3); + data.append(0); + data.append(0); + data.append(5); data.append(1); + data.append(2); + data.append(0); + data.append(2); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_u32/output_0.cairo b/tests/nodes/greater_u32/output_0.cairo index 4ff4b7ffe..a7a8c7b52 100644 --- a/tests/nodes/greater_u32/output_0.cairo +++ b/tests/nodes/greater_u32/output_0.cairo @@ -1,27 +1,24 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); data.append(0); - data.append(1); + data.append(0); data.append(1); data.append(0); data.append(0); + data.append(1); data.append(0); data.append(0); data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(1); data.append(0); data.append(1); data.append(0); @@ -32,9 +29,13 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(1); - data.append(0); data.append(1); + data.append(1); + data.append(0); data.append(0); + data.append(0); + data.append(0); + data.append(1); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_u32_broadcast.cairo b/tests/nodes/greater_u32_broadcast.cairo index febcc1674..3c15cded9 100644 --- a/tests/nodes/greater_u32_broadcast.cairo +++ b/tests/nodes/greater_u32_broadcast.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; #[test] #[available_gas(2000000000)] fn test_greater_u32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.greater(@input_1); + let y_0 = input_0.greater(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/greater_u32_broadcast/input_0.cairo b/tests/nodes/greater_u32_broadcast/input_0.cairo index 972de6b6b..44d87a588 100644 --- a/tests/nodes/greater_u32_broadcast/input_0.cairo +++ b/tests/nodes/greater_u32_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); + data.append(0); + data.append(4); + data.append(3); data.append(4); - data.append(5); - data.append(2); - data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_u32_broadcast/input_1.cairo b/tests/nodes/greater_u32_broadcast/input_1.cairo index 2831d01cc..291c107db 100644 --- a/tests/nodes/greater_u32_broadcast/input_1.cairo +++ b/tests/nodes/greater_u32_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(2); - data.append(3); + data.append(1); + data.append(5); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_u32_broadcast/output_0.cairo b/tests/nodes/greater_u32_broadcast/output_0.cairo index 7e2cee38d..4818b6836 100644 --- a/tests/nodes/greater_u32_broadcast/output_0.cairo +++ b/tests/nodes/greater_u32_broadcast/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); data.append(0); data.append(0); + data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp16x16.cairo b/tests/nodes/less_equal_fp16x16.cairo index 19fe42d2d..fc14de86f 100644 --- a/tests/nodes/less_equal_fp16x16.cairo +++ b/tests/nodes/less_equal_fp16x16.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; #[test] #[available_gas(2000000000)] fn test_less_equal_fp16x16() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less_equal(@input_1); + let y_0 = input_0.less_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_equal_fp16x16/input_0.cairo b/tests/nodes/less_equal_fp16x16/input_0.cairo index ec0027d22..e98ef6e2f 100644 --- a/tests/nodes/less_equal_fp16x16/input_0.cairo +++ b/tests/nodes/less_equal_fp16x16/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp16x16/input_1.cairo b/tests/nodes/less_equal_fp16x16/input_1.cairo index 5cc407dc5..8d9a605d9 100644 --- a/tests/nodes/less_equal_fp16x16/input_1.cairo +++ b/tests/nodes/less_equal_fp16x16/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -9,9 +9,9 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp16x16/output_0.cairo b/tests/nodes/less_equal_fp16x16/output_0.cairo index 7e2cee38d..f90460c3d 100644 --- a/tests/nodes/less_equal_fp16x16/output_0.cairo +++ b/tests/nodes/less_equal_fp16x16/output_0.cairo @@ -1,15 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); data.append(1); - data.append(1); + data.append(0); data.append(0); data.append(0); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/less_equal_fp16x16_broadcast.cairo b/tests/nodes/less_equal_fp16x16_broadcast.cairo index 6ca29eb78..27c3101db 100644 --- a/tests/nodes/less_equal_fp16x16_broadcast.cairo +++ b/tests/nodes/less_equal_fp16x16_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; #[test] #[available_gas(2000000000)] fn test_less_equal_fp16x16_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less_equal(@input_1); + let y_0 = input_0.less_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_equal_fp16x16_broadcast/input_0.cairo b/tests/nodes/less_equal_fp16x16_broadcast/input_0.cairo index 13261de0d..cf2986fd4 100644 --- a/tests/nodes/less_equal_fp16x16_broadcast/input_0.cairo +++ b/tests/nodes/less_equal_fp16x16_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp16x16_broadcast/input_1.cairo b/tests/nodes/less_equal_fp16x16_broadcast/input_1.cairo index 25a595400..e630cb30c 100644 --- a/tests/nodes/less_equal_fp16x16_broadcast/input_1.cairo +++ b/tests/nodes/less_equal_fp16x16_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp16x16_broadcast/output_0.cairo b/tests/nodes/less_equal_fp16x16_broadcast/output_0.cairo index 31cf673d4..5614176ce 100644 --- a/tests/nodes/less_equal_fp16x16_broadcast/output_0.cairo +++ b/tests/nodes/less_equal_fp16x16_broadcast/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); data.append(0); data.append(1); + data.append(0); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp8x23.cairo b/tests/nodes/less_equal_fp8x23.cairo index 3ee472dce..7d0aa2e23 100644 --- a/tests/nodes/less_equal_fp8x23.cairo +++ b/tests/nodes/less_equal_fp8x23.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; -use orion::operators::tensor::U32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; use orion::operators::tensor::FP8x23TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; #[test] #[available_gas(2000000000)] fn test_less_equal_fp8x23() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less_equal(@input_1); + let y_0 = input_0.less_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_equal_fp8x23/input_0.cairo b/tests/nodes/less_equal_fp8x23/input_0.cairo index cac7e356e..b51a0b8d0 100644 --- a/tests/nodes/less_equal_fp8x23/input_0.cairo +++ b/tests/nodes/less_equal_fp8x23/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -10,8 +10,8 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp8x23/input_1.cairo b/tests/nodes/less_equal_fp8x23/input_1.cairo index 6a5e5a086..cfb564ed9 100644 --- a/tests/nodes/less_equal_fp8x23/input_1.cairo +++ b/tests/nodes/less_equal_fp8x23/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -9,9 +9,9 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp8x23/output_0.cairo b/tests/nodes/less_equal_fp8x23/output_0.cairo index 07948a48e..4818b6836 100644 --- a/tests/nodes/less_equal_fp8x23/output_0.cairo +++ b/tests/nodes/less_equal_fp8x23/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); data.append(0); - data.append(1); data.append(0); data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp8x23_broadcast.cairo b/tests/nodes/less_equal_fp8x23_broadcast.cairo index 8cf36a6ba..7a5fb95cd 100644 --- a/tests/nodes/less_equal_fp8x23_broadcast.cairo +++ b/tests/nodes/less_equal_fp8x23_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; -use orion::operators::tensor::U32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; use orion::operators::tensor::FP8x23TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; #[test] #[available_gas(2000000000)] fn test_less_equal_fp8x23_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less_equal(@input_1); + let y_0 = input_0.less_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_equal_fp8x23_broadcast/input_0.cairo b/tests/nodes/less_equal_fp8x23_broadcast/input_0.cairo index 597e948e1..fc3ed6319 100644 --- a/tests/nodes/less_equal_fp8x23_broadcast/input_0.cairo +++ b/tests/nodes/less_equal_fp8x23_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -10,8 +10,8 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp8x23_broadcast/input_1.cairo b/tests/nodes/less_equal_fp8x23_broadcast/input_1.cairo index 6a7c55548..74307c4f3 100644 --- a/tests/nodes/less_equal_fp8x23_broadcast/input_1.cairo +++ b/tests/nodes/less_equal_fp8x23_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -10,6 +10,6 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp8x23_broadcast/output_0.cairo b/tests/nodes/less_equal_fp8x23_broadcast/output_0.cairo index 62010885f..085034f13 100644 --- a/tests/nodes/less_equal_fp8x23_broadcast/output_0.cairo +++ b/tests/nodes/less_equal_fp8x23_broadcast/output_0.cairo @@ -1,15 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); data.append(1); - data.append(1); + data.append(0); data.append(1); data.append(1); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/less_equal_i32.cairo b/tests/nodes/less_equal_i32.cairo index 3072a59b0..be4c222d1 100644 --- a/tests/nodes/less_equal_i32.cairo +++ b/tests/nodes/less_equal_i32.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::I32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; #[test] #[available_gas(2000000000)] fn test_less_equal_i32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less_equal(@input_1); + let y_0 = input_0.less_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_equal_i32/input_0.cairo b/tests/nodes/less_equal_i32/input_0.cairo index 11c8e73ff..72ef4d47d 100644 --- a/tests/nodes/less_equal_i32/input_0.cairo +++ b/tests/nodes/less_equal_i32/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,8 +10,8 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(-3); - data.append(-3); data.append(-2); - data.append(1); + data.append(-2); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i32/input_1.cairo b/tests/nodes/less_equal_i32/input_1.cairo index 330426cd7..c9ebd1f8a 100644 --- a/tests/nodes/less_equal_i32/input_1.cairo +++ b/tests/nodes/less_equal_i32/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,8 +10,8 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(2); - data.append(2); data.append(-3); - data.append(1); + data.append(-1); + data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i32/output_0.cairo b/tests/nodes/less_equal_i32/output_0.cairo index 8442d0d0c..87ee1df2e 100644 --- a/tests/nodes/less_equal_i32/output_0.cairo +++ b/tests/nodes/less_equal_i32/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); data.append(1); - data.append(1); data.append(0); data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i32_broadcast.cairo b/tests/nodes/less_equal_i32_broadcast.cairo index 3657b38d9..2b3e4f406 100644 --- a/tests/nodes/less_equal_i32_broadcast.cairo +++ b/tests/nodes/less_equal_i32_broadcast.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::I32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; #[test] #[available_gas(2000000000)] fn test_less_equal_i32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less_equal(@input_1); + let y_0 = input_0.less_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_equal_i32_broadcast/input_0.cairo b/tests/nodes/less_equal_i32_broadcast/input_0.cairo index 9f1d44f37..78aa6b0a0 100644 --- a/tests/nodes/less_equal_i32_broadcast/input_0.cairo +++ b/tests/nodes/less_equal_i32_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,8 +9,8 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-2); - data.append(1); + data.append(2); + data.append(-1); data.append(-3); data.append(-2); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/less_equal_i32_broadcast/input_1.cairo b/tests/nodes/less_equal_i32_broadcast/input_1.cairo index dc3c54f94..d810c8dc3 100644 --- a/tests/nodes/less_equal_i32_broadcast/input_1.cairo +++ b/tests/nodes/less_equal_i32_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-2); - data.append(-1); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i32_broadcast/output_0.cairo b/tests/nodes/less_equal_i32_broadcast/output_0.cairo index 31cf673d4..b066124bb 100644 --- a/tests/nodes/less_equal_i32_broadcast/output_0.cairo +++ b/tests/nodes/less_equal_i32_broadcast/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); data.append(0); data.append(1); data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i8.cairo b/tests/nodes/less_equal_i8.cairo index c86a70ec1..80b1ae428 100644 --- a/tests/nodes/less_equal_i8.cairo +++ b/tests/nodes/less_equal_i8.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use orion::operators::tensor::I8TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::I8TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; #[test] #[available_gas(2000000000)] fn test_less_equal_i8() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less_equal(@input_1); + let y_0 = input_0.less_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_equal_i8/input_0.cairo b/tests/nodes/less_equal_i8/input_0.cairo index 4f53a978e..c4c530cde 100644 --- a/tests/nodes/less_equal_i8/input_0.cairo +++ b/tests/nodes/less_equal_i8/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-3); - data.append(-1); data.append(-1); data.append(-2); + data.append(-2); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i8/input_1.cairo b/tests/nodes/less_equal_i8/input_1.cairo index 6cb982144..184156e2a 100644 --- a/tests/nodes/less_equal_i8/input_1.cairo +++ b/tests/nodes/less_equal_i8/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,9 +9,9 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); - data.append(-2); + data.append(-3); data.append(1); + data.append(-1); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i8/output_0.cairo b/tests/nodes/less_equal_i8/output_0.cairo index 31cf673d4..eabe7662b 100644 --- a/tests/nodes/less_equal_i8/output_0.cairo +++ b/tests/nodes/less_equal_i8/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); data.append(0); data.append(1); data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i8_broadcast.cairo b/tests/nodes/less_equal_i8_broadcast.cairo index ac53e3aa6..33afe5af8 100644 --- a/tests/nodes/less_equal_i8_broadcast.cairo +++ b/tests/nodes/less_equal_i8_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use orion::operators::tensor::I8TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::I8TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; #[test] #[available_gas(2000000000)] fn test_less_equal_i8_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less_equal(@input_1); + let y_0 = input_0.less_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_equal_i8_broadcast/input_0.cairo b/tests/nodes/less_equal_i8_broadcast/input_0.cairo index 835e66354..288365601 100644 --- a/tests/nodes/less_equal_i8_broadcast/input_0.cairo +++ b/tests/nodes/less_equal_i8_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(2); - data.append(2); - data.append(2); data.append(-3); + data.append(-3); + data.append(-3); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i8_broadcast/input_1.cairo b/tests/nodes/less_equal_i8_broadcast/input_1.cairo index 02ff8facd..0f1ad9249 100644 --- a/tests/nodes/less_equal_i8_broadcast/input_1.cairo +++ b/tests/nodes/less_equal_i8_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-3); - data.append(-3); + data.append(-2); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i8_broadcast/output_0.cairo b/tests/nodes/less_equal_i8_broadcast/output_0.cairo index 9a2391c78..0367c57b6 100644 --- a/tests/nodes/less_equal_i8_broadcast/output_0.cairo +++ b/tests/nodes/less_equal_i8_broadcast/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); + data.append(1); + data.append(1); + data.append(1); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_u32.cairo b/tests/nodes/less_equal_u32.cairo index 8a1e7aab4..acc3ac6e5 100644 --- a/tests/nodes/less_equal_u32.cairo +++ b/tests/nodes/less_equal_u32.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; #[test] #[available_gas(2000000000)] fn test_less_equal_u32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less_equal(@input_1); + let y_0 = input_0.less_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_equal_u32/input_0.cairo b/tests/nodes/less_equal_u32/input_0.cairo index 84b61d7cc..d8497931b 100644 --- a/tests/nodes/less_equal_u32/input_0.cairo +++ b/tests/nodes/less_equal_u32/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); - data.append(2); + data.append(0); data.append(5); + data.append(1); data.append(5); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_u32/input_1.cairo b/tests/nodes/less_equal_u32/input_1.cairo index fe6539464..652fdcffd 100644 --- a/tests/nodes/less_equal_u32/input_1.cairo +++ b/tests/nodes/less_equal_u32/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,8 +10,8 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); - data.append(4); + data.append(1); + data.append(2); data.append(5); - data.append(3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_u32/output_0.cairo b/tests/nodes/less_equal_u32/output_0.cairo index de313d890..085034f13 100644 --- a/tests/nodes/less_equal_u32/output_0.cairo +++ b/tests/nodes/less_equal_u32/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); + data.append(1); data.append(0); data.append(1); data.append(1); - data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_u32_broadcast.cairo b/tests/nodes/less_equal_u32_broadcast.cairo index dc695687d..e5d6b43ab 100644 --- a/tests/nodes/less_equal_u32_broadcast.cairo +++ b/tests/nodes/less_equal_u32_broadcast.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; #[test] #[available_gas(2000000000)] fn test_less_equal_u32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less_equal(@input_1); + let y_0 = input_0.less_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_equal_u32_broadcast/input_0.cairo b/tests/nodes/less_equal_u32_broadcast/input_0.cairo index a6bf00a7c..b6a6ddbcf 100644 --- a/tests/nodes/less_equal_u32_broadcast/input_0.cairo +++ b/tests/nodes/less_equal_u32_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); + data.append(1); + data.append(1); data.append(0); - data.append(5); - data.append(4); - data.append(0); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_u32_broadcast/input_1.cairo b/tests/nodes/less_equal_u32_broadcast/input_1.cairo index 8e7328b81..591525d47 100644 --- a/tests/nodes/less_equal_u32_broadcast/input_1.cairo +++ b/tests/nodes/less_equal_u32_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); + data.append(2); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_u32_broadcast/output_0.cairo b/tests/nodes/less_equal_u32_broadcast/output_0.cairo index ef770fa07..897d076d9 100644 --- a/tests/nodes/less_equal_u32_broadcast/output_0.cairo +++ b/tests/nodes/less_equal_u32_broadcast/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); data.append(1); - data.append(0); - data.append(0); data.append(1); + data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16.cairo b/tests/nodes/less_fp16x16.cairo index d2d62e5bc..ed163c56f 100644 --- a/tests/nodes/less_fp16x16.cairo +++ b/tests/nodes/less_fp16x16.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP16x16TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::FP16x16TensorPartialEq; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_fp16x16/input_0.cairo b/tests/nodes/less_fp16x16/input_0.cairo index 196bfa800..9082c0ec3 100644 --- a/tests/nodes/less_fp16x16/input_0.cairo +++ b/tests/nodes/less_fp16x16/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16/input_1.cairo b/tests/nodes/less_fp16x16/input_1.cairo index 417f0b2c9..bc42d9df3 100644 --- a/tests/nodes/less_fp16x16/input_1.cairo +++ b/tests/nodes/less_fp16x16/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16/output_0.cairo b/tests/nodes/less_fp16x16/output_0.cairo index a63249875..1b19d050d 100644 --- a/tests/nodes/less_fp16x16/output_0.cairo +++ b/tests/nodes/less_fp16x16/output_0.cairo @@ -1,41 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); + data.append(0); data.append(0); data.append(0); data.append(0); data.append(1); + data.append(0); data.append(1); data.append(1); - data.append(0); - data.append(0); data.append(1); data.append(0); + data.append(1); data.append(0); + data.append(1); + data.append(1); data.append(0); data.append(0); data.append(0); data.append(1); + data.append(1); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(0); data.append(1); data.append(1); data.append(0); data.append(0); - data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16_broadcast.cairo b/tests/nodes/less_fp16x16_broadcast.cairo index 300ce8633..2866d6378 100644 --- a/tests/nodes/less_fp16x16_broadcast.cairo +++ b/tests/nodes/less_fp16x16_broadcast.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP16x16TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::FP16x16TensorPartialEq; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_fp16x16_broadcast/input_0.cairo b/tests/nodes/less_fp16x16_broadcast/input_0.cairo index 6f1019bfb..bc61eeea2 100644 --- a/tests/nodes/less_fp16x16_broadcast/input_0.cairo +++ b/tests/nodes/less_fp16x16_broadcast/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16_broadcast/input_1.cairo b/tests/nodes/less_fp16x16_broadcast/input_1.cairo index e28fda5fc..303d93806 100644 --- a/tests/nodes/less_fp16x16_broadcast/input_1.cairo +++ b/tests/nodes/less_fp16x16_broadcast/input_1.cairo @@ -10,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16_broadcast/output_0.cairo b/tests/nodes/less_fp16x16_broadcast/output_0.cairo index e98ba8452..6f6745c19 100644 --- a/tests/nodes/less_fp16x16_broadcast/output_0.cairo +++ b/tests/nodes/less_fp16x16_broadcast/output_0.cairo @@ -1,9 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -12,20 +12,14 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); data.append(0); data.append(1); - data.append(0); data.append(1); data.append(1); data.append(0); data.append(1); data.append(1); - data.append(0); - data.append(0); + data.append(1); data.append(0); data.append(0); data.append(1); @@ -33,9 +27,15 @@ fn output_0() -> Tensor { data.append(1); data.append(1); data.append(0); + data.append(0); + data.append(0); data.append(1); + data.append(0); data.append(1); data.append(1); + data.append(1); + data.append(1); + data.append(0); data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23.cairo b/tests/nodes/less_fp8x23.cairo index a9e6a56f8..6a6b5e97e 100644 --- a/tests/nodes/less_fp8x23.cairo +++ b/tests/nodes/less_fp8x23.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_fp8x23/input_0.cairo b/tests/nodes/less_fp8x23/input_0.cairo index 78b7afc4c..4016e2d82 100644 --- a/tests/nodes/less_fp8x23/input_0.cairo +++ b/tests/nodes/less_fp8x23/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23/input_1.cairo b/tests/nodes/less_fp8x23/input_1.cairo index c406deb82..9a441b427 100644 --- a/tests/nodes/less_fp8x23/input_1.cairo +++ b/tests/nodes/less_fp8x23/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23/output_0.cairo b/tests/nodes/less_fp8x23/output_0.cairo index 8384ae7c3..744c7f5a4 100644 --- a/tests/nodes/less_fp8x23/output_0.cairo +++ b/tests/nodes/less_fp8x23/output_0.cairo @@ -1,19 +1,26 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); data.append(1); data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(1); data.append(0); data.append(1); + data.append(1); data.append(0); data.append(0); data.append(0); @@ -22,20 +29,13 @@ fn output_0() -> Tensor { data.append(0); data.append(1); data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); data.append(1); data.append(1); data.append(0); - data.append(0); data.append(1); data.append(0); data.append(0); - data.append(0); data.append(1); data.append(0); - data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23_broadcast.cairo b/tests/nodes/less_fp8x23_broadcast.cairo index 8f30d9941..5423670f7 100644 --- a/tests/nodes/less_fp8x23_broadcast.cairo +++ b/tests/nodes/less_fp8x23_broadcast.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_fp8x23_broadcast/input_0.cairo b/tests/nodes/less_fp8x23_broadcast/input_0.cairo index b3dfbb2e4..5c0be3f96 100644 --- a/tests/nodes/less_fp8x23_broadcast/input_0.cairo +++ b/tests/nodes/less_fp8x23_broadcast/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23_broadcast/input_1.cairo b/tests/nodes/less_fp8x23_broadcast/input_1.cairo index 11c1e2841..c1b783165 100644 --- a/tests/nodes/less_fp8x23_broadcast/input_1.cairo +++ b/tests/nodes/less_fp8x23_broadcast/input_1.cairo @@ -10,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23_broadcast/output_0.cairo b/tests/nodes/less_fp8x23_broadcast/output_0.cairo index f05fe8301..b926f44e6 100644 --- a/tests/nodes/less_fp8x23_broadcast/output_0.cairo +++ b/tests/nodes/less_fp8x23_broadcast/output_0.cairo @@ -1,9 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -12,29 +12,29 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); data.append(0); data.append(0); data.append(0); data.append(1); + data.append(1); data.append(0); data.append(0); data.append(0); + data.append(1); + data.append(1); + data.append(1); data.append(0); + data.append(1); data.append(0); data.append(0); + data.append(1); data.append(0); data.append(1); data.append(1); data.append(0); data.append(0); + data.append(1); + data.append(1); data.append(0); data.append(0); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/less_i32.cairo b/tests/nodes/less_i32.cairo index f2648c188..a2eaffdd4 100644 --- a/tests/nodes/less_i32.cairo +++ b/tests/nodes/less_i32.cairo @@ -3,13 +3,11 @@ mod input_1; mod output_0; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_i32/input_0.cairo b/tests/nodes/less_i32/input_0.cairo index e534eab72..2797bb47f 100644 --- a/tests/nodes/less_i32/input_0.cairo +++ b/tests/nodes/less_i32/input_0.cairo @@ -10,30 +10,30 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-3); - data.append(0); + data.append(1); data.append(-2); - data.append(-1); + data.append(1); + data.append(2); + data.append(-2); + data.append(1); data.append(-3); data.append(2); - data.append(0); - data.append(0); + data.append(1); data.append(-1); - data.append(-3); - data.append(0); - data.append(-3); + data.append(-2); + data.append(-1); + data.append(-1); + data.append(-1); + data.append(-2); + data.append(2); + data.append(1); + data.append(1); data.append(1); - data.append(0); data.append(1); data.append(-1); + data.append(-3); data.append(-1); data.append(0); - data.append(2); - data.append(0); - data.append(-1); - data.append(2); - data.append(2); - data.append(-2); data.append(0); data.append(0); data.append(1); diff --git a/tests/nodes/less_i32/input_1.cairo b/tests/nodes/less_i32/input_1.cairo index a783f55d8..69ac67e22 100644 --- a/tests/nodes/less_i32/input_1.cairo +++ b/tests/nodes/less_i32/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); data.append(-3); data.append(0); - data.append(-3); - data.append(-3); data.append(-2); - data.append(-3); - data.append(-3); + data.append(-1); data.append(2); + data.append(-1); + data.append(-2); data.append(0); + data.append(-3); data.append(1); + data.append(-1); + data.append(-2); data.append(-2); - data.append(1); data.append(0); - data.append(2); data.append(0); - data.append(2); - data.append(2); - data.append(-3); - data.append(-3); - data.append(-3); data.append(-1); + data.append(-2); data.append(2); - data.append(-3); data.append(1); + data.append(-2); + data.append(2); + data.append(-1); data.append(2); data.append(-2); + data.append(-2); + data.append(-3); + data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32/output_0.cairo b/tests/nodes/less_i32/output_0.cairo index d33bd84f2..eb28d8666 100644 --- a/tests/nodes/less_i32/output_0.cairo +++ b/tests/nodes/less_i32/output_0.cairo @@ -1,30 +1,35 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); data.append(0); data.append(1); data.append(0); data.append(0); + data.append(1); data.append(0); + data.append(1); data.append(0); data.append(0); data.append(1); data.append(1); + data.append(0); + data.append(0); data.append(1); data.append(1); data.append(0); data.append(0); data.append(1); + data.append(0); + data.append(0); data.append(1); data.append(1); data.append(1); @@ -32,10 +37,5 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32_broadcast.cairo b/tests/nodes/less_i32_broadcast.cairo index 652384e37..b71e77968 100644 --- a/tests/nodes/less_i32_broadcast.cairo +++ b/tests/nodes/less_i32_broadcast.cairo @@ -3,13 +3,11 @@ mod input_1; mod output_0; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_i32_broadcast/input_0.cairo b/tests/nodes/less_i32_broadcast/input_0.cairo index 65654c2ba..7029ddf16 100644 --- a/tests/nodes/less_i32_broadcast/input_0.cairo +++ b/tests/nodes/less_i32_broadcast/input_0.cairo @@ -11,31 +11,31 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(-2); + data.append(2); data.append(0); - data.append(1); - data.append(0); - data.append(-3); - data.append(1); data.append(-1); data.append(1); data.append(1); - data.append(0); data.append(2); - data.append(1); + data.append(0); + data.append(0); + data.append(-1); + data.append(-3); data.append(-2); data.append(1); data.append(-2); - data.append(-2); + data.append(0); data.append(-1); data.append(-1); - data.append(-2); + data.append(0); data.append(2); + data.append(2); + data.append(0); + data.append(-1); + data.append(1); + data.append(-2); + data.append(1); data.append(-1); data.append(1); - data.append(-3); - data.append(0); - data.append(0); - data.append(2); - data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32_broadcast/input_1.cairo b/tests/nodes/less_i32_broadcast/input_1.cairo index 68175cdbb..426461d10 100644 --- a/tests/nodes/less_i32_broadcast/input_1.cairo +++ b/tests/nodes/less_i32_broadcast/input_1.cairo @@ -10,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(1); - data.append(-3); data.append(-3); + data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32_broadcast/output_0.cairo b/tests/nodes/less_i32_broadcast/output_0.cairo index f68184301..45e698fa6 100644 --- a/tests/nodes/less_i32_broadcast/output_0.cairo +++ b/tests/nodes/less_i32_broadcast/output_0.cairo @@ -1,21 +1,15 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); data.append(0); data.append(0); data.append(0); @@ -23,19 +17,25 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); - data.append(0); + data.append(1); + data.append(1); data.append(0); data.append(0); data.append(0); data.append(0); data.append(1); - data.append(0); + data.append(1); + data.append(1); + data.append(1); data.append(1); data.append(0); data.append(0); data.append(0); + data.append(1); data.append(0); + data.append(1); data.append(0); + data.append(1); data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8.cairo b/tests/nodes/less_i8.cairo index 5e7e38c72..7b569982f 100644 --- a/tests/nodes/less_i8.cairo +++ b/tests/nodes/less_i8.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I8TensorPartialEq; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_i8/input_0.cairo b/tests/nodes/less_i8/input_0.cairo index 2ab7aa6a9..e03f2cf94 100644 --- a/tests/nodes/less_i8/input_0.cairo +++ b/tests/nodes/less_i8/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(2); - data.append(2); data.append(-2); data.append(1); + data.append(1); + data.append(0); data.append(-3); data.append(-1); - data.append(-3); data.append(-1); - data.append(1); - data.append(1); - data.append(0); + data.append(2); + data.append(-3); + data.append(-3); data.append(-3); - data.append(-2); data.append(2); - data.append(1); - data.append(1); - data.append(0); + data.append(-2); data.append(0); - data.append(1); - data.append(1); - data.append(2); - data.append(1); data.append(-1); + data.append(2); data.append(-3); + data.append(2); data.append(-1); - data.append(1); data.append(0); + data.append(-2); + data.append(-1); + data.append(-1); + data.append(-3); + data.append(-2); + data.append(-3); + data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8/input_1.cairo b/tests/nodes/less_i8/input_1.cairo index 4f243f384..64effdde2 100644 --- a/tests/nodes/less_i8/input_1.cairo +++ b/tests/nodes/less_i8/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-1); - data.append(0); data.append(0); - data.append(-1); data.append(1); data.append(0); data.append(1); data.append(0); data.append(0); - data.append(2); - data.append(-1); data.append(0); - data.append(2); - data.append(2); + data.append(-1); + data.append(-2); + data.append(-2); + data.append(1); + data.append(-2); data.append(1); data.append(0); data.append(-2); + data.append(-3); data.append(-1); + data.append(-3); + data.append(-2); + data.append(1); data.append(1); - data.append(2); data.append(-1); - data.append(-3); data.append(-2); data.append(-1); data.append(-1); data.append(-2); - data.append(1); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8/output_0.cairo b/tests/nodes/less_i8/output_0.cairo index deaa47e94..4e7322ca4 100644 --- a/tests/nodes/less_i8/output_0.cairo +++ b/tests/nodes/less_i8/output_0.cairo @@ -1,41 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); data.append(1); data.append(0); + data.append(0); data.append(1); data.append(1); data.append(1); data.append(1); data.append(0); data.append(1); - data.append(0); data.append(1); data.append(1); data.append(0); - data.append(0); - data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); data.append(1); data.append(0); data.append(0); - data.append(0); + data.append(1); data.append(1); data.append(0); data.append(0); data.append(1); + data.append(1); + data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8_broadcast.cairo b/tests/nodes/less_i8_broadcast.cairo index 11522179f..e15d4b262 100644 --- a/tests/nodes/less_i8_broadcast.cairo +++ b/tests/nodes/less_i8_broadcast.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I8TensorPartialEq; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_i8_broadcast/input_0.cairo b/tests/nodes/less_i8_broadcast/input_0.cairo index 1a26a2ad9..376342d8b 100644 --- a/tests/nodes/less_i8_broadcast/input_0.cairo +++ b/tests/nodes/less_i8_broadcast/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-3); - data.append(-2); - data.append(1); - data.append(-3); data.append(2); - data.append(-2); - data.append(-1); - data.append(-2); - data.append(2); - data.append(-3); data.append(-1); - data.append(-2); - data.append(-3); + data.append(1); data.append(0); - data.append(-1); data.append(1); - data.append(2); data.append(-1); + data.append(-1); + data.append(-2); data.append(2); data.append(-1); data.append(-3); - data.append(-1); - data.append(-1); - data.append(1); + data.append(-3); + data.append(2); + data.append(0); data.append(0); - data.append(1); data.append(-3); + data.append(-2); + data.append(2); + data.append(-2); + data.append(0); + data.append(1); + data.append(-1); + data.append(1); + data.append(2); + data.append(-2); + data.append(-2); + data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8_broadcast/input_1.cairo b/tests/nodes/less_i8_broadcast/input_1.cairo index 5dd4851ef..98033a6e9 100644 --- a/tests/nodes/less_i8_broadcast/input_1.cairo +++ b/tests/nodes/less_i8_broadcast/input_1.cairo @@ -10,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(0); data.append(-2); - data.append(2); + data.append(-2); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8_broadcast/output_0.cairo b/tests/nodes/less_i8_broadcast/output_0.cairo index 5c189b150..878a9be68 100644 --- a/tests/nodes/less_i8_broadcast/output_0.cairo +++ b/tests/nodes/less_i8_broadcast/output_0.cairo @@ -1,36 +1,36 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); data.append(0); - data.append(1); + data.append(0); + data.append(0); + data.append(0); data.append(0); data.append(0); data.append(1); data.append(1); data.append(0); + data.append(0); data.append(1); data.append(1); + data.append(0); + data.append(0); + data.append(0); data.append(1); data.append(1); data.append(0); data.append(0); - data.append(1); data.append(0); - data.append(1); data.append(0); - data.append(1); - data.append(1); data.append(0); data.append(0); data.append(0); diff --git a/tests/nodes/less_u32.cairo b/tests/nodes/less_u32.cairo index 20edb7d18..29b222561 100644 --- a/tests/nodes/less_u32.cairo +++ b/tests/nodes/less_u32.cairo @@ -3,11 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::{TensorTrait, Tensor}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_u32/input_0.cairo b/tests/nodes/less_u32/input_0.cairo index 351e02b1c..b424b92ec 100644 --- a/tests/nodes/less_u32/input_0.cairo +++ b/tests/nodes/less_u32/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(3); - data.append(3); - data.append(5); - data.append(0); - data.append(3); data.append(4); - data.append(3); - data.append(3); - data.append(0); - data.append(0); - data.append(3); + data.append(2); + data.append(4); + data.append(4); + data.append(1); data.append(0); data.append(1); - data.append(4); + data.append(1); data.append(1); data.append(5); + data.append(0); + data.append(0); + data.append(5); + data.append(3); data.append(3); data.append(4); data.append(5); - data.append(5); - data.append(0); - data.append(0); data.append(4); - data.append(3); - data.append(2); + data.append(5); + data.append(1); data.append(2); + data.append(5); + data.append(3); + data.append(0); + data.append(1); data.append(2); + data.append(5); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32/input_1.cairo b/tests/nodes/less_u32/input_1.cairo index 17e0c6f7b..06e6ceb5f 100644 --- a/tests/nodes/less_u32/input_1.cairo +++ b/tests/nodes/less_u32/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(3); data.append(4); + data.append(2); data.append(0); - data.append(3); + data.append(2); + data.append(1); data.append(4); data.append(3); - data.append(5); + data.append(3); + data.append(3); + data.append(3); data.append(4); data.append(5); - data.append(3); - data.append(2); - data.append(0); - data.append(0); + data.append(5); + data.append(5); data.append(0); data.append(4); - data.append(0); data.append(1); - data.append(4); - data.append(2); - data.append(0); + data.append(5); data.append(1); - data.append(0); - data.append(2); + data.append(5); data.append(3); + data.append(5); data.append(0); - data.append(0); - data.append(0); + data.append(1); + data.append(5); data.append(4); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32/output_0.cairo b/tests/nodes/less_u32/output_0.cairo index cde1b985b..0049de57d 100644 --- a/tests/nodes/less_u32/output_0.cairo +++ b/tests/nodes/less_u32/output_0.cairo @@ -1,26 +1,30 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); + data.append(0); data.append(1); data.append(0); data.append(0); data.append(1); - data.append(0); data.append(1); data.append(1); data.append(1); data.append(1); + data.append(0); + data.append(1); data.append(1); data.append(0); + data.append(1); + data.append(1); data.append(0); data.append(0); data.append(0); @@ -28,14 +32,10 @@ fn output_0() -> Tensor { data.append(0); data.append(1); data.append(0); - data.append(0); - data.append(0); - data.append(0); data.append(1); data.append(0); data.append(0); - data.append(0); - data.append(0); data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32_broadcast.cairo b/tests/nodes/less_u32_broadcast.cairo index e4ea35ea3..cd2df7282 100644 --- a/tests/nodes/less_u32_broadcast.cairo +++ b/tests/nodes/less_u32_broadcast.cairo @@ -3,11 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::{TensorTrait, Tensor}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_u32_broadcast/input_0.cairo b/tests/nodes/less_u32_broadcast/input_0.cairo index d16f00b49..df8b0c90c 100644 --- a/tests/nodes/less_u32_broadcast/input_0.cairo +++ b/tests/nodes/less_u32_broadcast/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(4); - data.append(5); - data.append(4); data.append(1); - data.append(3); - data.append(0); - data.append(5); + data.append(4); data.append(5); data.append(3); data.append(4); - data.append(2); - data.append(2); - data.append(5); data.append(0); + data.append(1); data.append(0); data.append(0); + data.append(1); + data.append(5); + data.append(4); + data.append(3); + data.append(3); data.append(0); + data.append(3); data.append(0); + data.append(3); + data.append(5); data.append(1); - data.append(1); - data.append(1); + data.append(4); + data.append(5); data.append(2); data.append(1); - data.append(0); data.append(3); - data.append(4); + data.append(2); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32_broadcast/input_1.cairo b/tests/nodes/less_u32_broadcast/input_1.cairo index 0f4dcd907..790699956 100644 --- a/tests/nodes/less_u32_broadcast/input_1.cairo +++ b/tests/nodes/less_u32_broadcast/input_1.cairo @@ -10,7 +10,7 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(1); + data.append(2); data.append(5); data.append(3); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/less_u32_broadcast/output_0.cairo b/tests/nodes/less_u32_broadcast/output_0.cairo index f22ac65b4..396cb18f5 100644 --- a/tests/nodes/less_u32_broadcast/output_0.cairo +++ b/tests/nodes/less_u32_broadcast/output_0.cairo @@ -1,41 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); + data.append(1); data.append(0); data.append(0); data.append(1); data.append(1); data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); data.append(1); data.append(1); data.append(1); data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(1); data.append(1); data.append(0); + data.append(1); data.append(0); data.append(0); data.append(1); + data.append(0); + data.append(0); data.append(1); data.append(1); data.append(0); - data.append(0); + data.append(1); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_fp16x16.cairo b/tests/nodes/or_fp16x16.cairo index 6bc72e320..bd23ac7c4 100644 --- a/tests/nodes/or_fp16x16.cairo +++ b/tests/nodes/or_fp16x16.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; #[test] #[available_gas(2000000000)] fn test_or_fp16x16() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.or(@input_1); + let y_0 = input_0.or(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/or_fp16x16/input_0.cairo b/tests/nodes/or_fp16x16/input_0.cairo index 76289ddcc..d834a7f97 100644 --- a/tests/nodes/or_fp16x16/input_0.cairo +++ b/tests/nodes/or_fp16x16/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_fp16x16/input_1.cairo b/tests/nodes/or_fp16x16/input_1.cairo index 16f8a9735..397241eee 100644 --- a/tests/nodes/or_fp16x16/input_1.cairo +++ b/tests/nodes/or_fp16x16/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_fp16x16/output_0.cairo b/tests/nodes/or_fp16x16/output_0.cairo index 07754b1eb..77eb5ca09 100644 --- a/tests/nodes/or_fp16x16/output_0.cairo +++ b/tests/nodes/or_fp16x16/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -17,7 +18,7 @@ fn output_0() -> Tensor { data.append(1); data.append(1); data.append(1); - data.append(0); + data.append(1); data.append(1); data.append(1); data.append(1); diff --git a/tests/nodes/or_fp16x16_broadcast.cairo b/tests/nodes/or_fp16x16_broadcast.cairo index 008614487..2a1a6d05a 100644 --- a/tests/nodes/or_fp16x16_broadcast.cairo +++ b/tests/nodes/or_fp16x16_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; #[test] #[available_gas(2000000000)] fn test_or_fp16x16_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.or(@input_1); + let y_0 = input_0.or(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/or_fp16x16_broadcast/input_0.cairo b/tests/nodes/or_fp16x16_broadcast/input_0.cairo index cea1413f2..97aba6ffa 100644 --- a/tests/nodes/or_fp16x16_broadcast/input_0.cairo +++ b/tests/nodes/or_fp16x16_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_fp16x16_broadcast/input_1.cairo b/tests/nodes/or_fp16x16_broadcast/input_1.cairo index 3d0d0a4bd..7e68b68a9 100644 --- a/tests/nodes/or_fp16x16_broadcast/input_1.cairo +++ b/tests/nodes/or_fp16x16_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_fp16x16_broadcast/output_0.cairo b/tests/nodes/or_fp16x16_broadcast/output_0.cairo index 2041bdce3..0367c57b6 100644 --- a/tests/nodes/or_fp16x16_broadcast/output_0.cairo +++ b/tests/nodes/or_fp16x16_broadcast/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); diff --git a/tests/nodes/or_fp8x23.cairo b/tests/nodes/or_fp8x23.cairo index fbd0d02c3..a946bc909 100644 --- a/tests/nodes/or_fp8x23.cairo +++ b/tests/nodes/or_fp8x23.cairo @@ -3,12 +3,12 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; use orion::utils::{assert_eq, assert_seq_eq}; #[test] @@ -16,9 +16,9 @@ use orion::utils::{assert_eq, assert_seq_eq}; fn test_or_fp8x23() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.or(@input_1); + let y_0 = input_0.or(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/or_fp8x23/input_0.cairo b/tests/nodes/or_fp8x23/input_0.cairo index e5c3797ea..eac60eec6 100644 --- a/tests/nodes/or_fp8x23/input_0.cairo +++ b/tests/nodes/or_fp8x23/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -11,31 +11,31 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_fp8x23/input_1.cairo b/tests/nodes/or_fp8x23/input_1.cairo index 225d49dcd..7395c8efc 100644 --- a/tests/nodes/or_fp8x23/input_1.cairo +++ b/tests/nodes/or_fp8x23/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -11,31 +11,31 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_fp8x23/output_0.cairo b/tests/nodes/or_fp8x23/output_0.cairo index 47d67b111..e391d000f 100644 --- a/tests/nodes/or_fp8x23/output_0.cairo +++ b/tests/nodes/or_fp8x23/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -24,7 +25,7 @@ fn output_0() -> Tensor { data.append(1); data.append(1); data.append(1); - data.append(1); + data.append(0); data.append(1); data.append(1); data.append(1); diff --git a/tests/nodes/or_fp8x23_broadcast.cairo b/tests/nodes/or_fp8x23_broadcast.cairo index a286666c2..0cd4ea567 100644 --- a/tests/nodes/or_fp8x23_broadcast.cairo +++ b/tests/nodes/or_fp8x23_broadcast.cairo @@ -3,12 +3,12 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; use orion::utils::{assert_eq, assert_seq_eq}; #[test] @@ -16,9 +16,9 @@ use orion::utils::{assert_eq, assert_seq_eq}; fn test_or_fp8x23_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.or(@input_1); + let y_0 = input_0.or(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/or_fp8x23_broadcast/input_0.cairo b/tests/nodes/or_fp8x23_broadcast/input_0.cairo index 5c6660a18..247e51688 100644 --- a/tests/nodes/or_fp8x23_broadcast/input_0.cairo +++ b/tests/nodes/or_fp8x23_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_fp8x23_broadcast/input_1.cairo b/tests/nodes/or_fp8x23_broadcast/input_1.cairo index adadfb8cb..54e53e26a 100644 --- a/tests/nodes/or_fp8x23_broadcast/input_1.cairo +++ b/tests/nodes/or_fp8x23_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_fp8x23_broadcast/output_0.cairo b/tests/nodes/or_fp8x23_broadcast/output_0.cairo index 2041bdce3..0367c57b6 100644 --- a/tests/nodes/or_fp8x23_broadcast/output_0.cairo +++ b/tests/nodes/or_fp8x23_broadcast/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); diff --git a/tests/nodes/or_i32.cairo b/tests/nodes/or_i32.cairo index e2aa6a99d..ce7f05023 100644 --- a/tests/nodes/or_i32.cairo +++ b/tests/nodes/or_i32.cairo @@ -3,12 +3,10 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorMul}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; use orion::utils::{assert_eq, assert_seq_eq}; #[test] @@ -16,9 +14,9 @@ use orion::utils::{assert_eq, assert_seq_eq}; fn test_or_i32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.or(@input_1); + let y_0 = input_0.or(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/or_i32/input_0.cairo b/tests/nodes/or_i32/input_0.cairo index 45a1a532b..4f63cdac0 100644 --- a/tests/nodes/or_i32/input_0.cairo +++ b/tests/nodes/or_i32/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorMul}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-3); data.append(0); + data.append(-3); + data.append(2); data.append(-2); data.append(1); + data.append(0); data.append(-3); + data.append(0); + data.append(-1); data.append(1); data.append(1); + data.append(-1); data.append(-3); data.append(1); - data.append(1); - data.append(0); + data.append(-3); data.append(-2); data.append(-3); - data.append(1); - data.append(0); - data.append(-1); - data.append(-1); data.append(-3); + data.append(2); data.append(-2); - data.append(-1); - data.append(-1); data.append(-2); data.append(1); + data.append(-2); + data.append(0); data.append(2); - data.append(-1); - data.append(-1); - data.append(-3); + data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_i32/input_1.cairo b/tests/nodes/or_i32/input_1.cairo index 016db0a53..dc6bfe1ef 100644 --- a/tests/nodes/or_i32/input_1.cairo +++ b/tests/nodes/or_i32/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorMul}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-1); - data.append(0); - data.append(-1); data.append(-2); + data.append(-3); data.append(1); - data.append(0); + data.append(2); + data.append(-2); + data.append(-2); data.append(-1); data.append(1); - data.append(-3); + data.append(2); data.append(-2); data.append(-3); - data.append(-1); - data.append(-3); + data.append(0); + data.append(1); + data.append(-2); data.append(-3); data.append(2); - data.append(-1); - data.append(0); + data.append(-3); + data.append(1); data.append(0); data.append(-2); - data.append(-2); + data.append(0); + data.append(-3); data.append(2); - data.append(-2); - data.append(-2); data.append(-1); - data.append(-3); - data.append(-3); + data.append(1); + data.append(2); data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_i32/output_0.cairo b/tests/nodes/or_i32/output_0.cairo index 95194b767..77eb5ca09 100644 --- a/tests/nodes/or_i32/output_0.cairo +++ b/tests/nodes/or_i32/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -10,7 +11,7 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); - data.append(0); + data.append(1); data.append(1); data.append(1); data.append(1); diff --git a/tests/nodes/or_i32_broadcast.cairo b/tests/nodes/or_i32_broadcast.cairo index a8a5caee8..f2265d497 100644 --- a/tests/nodes/or_i32_broadcast.cairo +++ b/tests/nodes/or_i32_broadcast.cairo @@ -3,12 +3,10 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorMul}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; use orion::utils::{assert_eq, assert_seq_eq}; #[test] @@ -16,9 +14,9 @@ use orion::utils::{assert_eq, assert_seq_eq}; fn test_or_i32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.or(@input_1); + let y_0 = input_0.or(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/or_i32_broadcast/input_0.cairo b/tests/nodes/or_i32_broadcast/input_0.cairo index eded6f98f..224a20b1e 100644 --- a/tests/nodes/or_i32_broadcast/input_0.cairo +++ b/tests/nodes/or_i32_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorMul}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,8 +10,8 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); - data.append(1); + data.append(-3); data.append(2); - data.append(-1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_i32_broadcast/input_1.cairo b/tests/nodes/or_i32_broadcast/input_1.cairo index 09e58e0d8..8b33cf367 100644 --- a/tests/nodes/or_i32_broadcast/input_1.cairo +++ b/tests/nodes/or_i32_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorMul}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-3); - data.append(-3); + data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_i32_broadcast/output_0.cairo b/tests/nodes/or_i32_broadcast/output_0.cairo index 2041bdce3..0367c57b6 100644 --- a/tests/nodes/or_i32_broadcast/output_0.cairo +++ b/tests/nodes/or_i32_broadcast/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); diff --git a/tests/nodes/or_i8.cairo b/tests/nodes/or_i8.cairo index b97720435..bc8e26c0e 100644 --- a/tests/nodes/or_i8.cairo +++ b/tests/nodes/or_i8.cairo @@ -3,12 +3,12 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{I8Tensor, I8TensorMul}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; #[test] @@ -16,9 +16,9 @@ use orion::utils::{assert_eq, assert_seq_eq}; fn test_or_i8() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.or(@input_1); + let y_0 = input_0.or(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/or_i8/input_0.cairo b/tests/nodes/or_i8/input_0.cairo index 2e3cfaefb..28d5be328 100644 --- a/tests/nodes/or_i8/input_0.cairo +++ b/tests/nodes/or_i8/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorMul}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(-2); + data.append(-3); + data.append(-3); data.append(0); - data.append(-1); - data.append(1); - data.append(2); - data.append(1); data.append(2); - data.append(-1); - data.append(2); - data.append(1); - data.append(1); + data.append(-2); + data.append(0); data.append(0); data.append(2); - data.append(-2); data.append(1); - data.append(0); data.append(1); - data.append(0); data.append(2); + data.append(-1); data.append(0); - data.append(2); data.append(0); - data.append(2); + data.append(-2); + data.append(-3); data.append(1); - data.append(2); + data.append(-2); + data.append(0); + data.append(-1); data.append(1); - data.append(-3); + data.append(-2); + data.append(-2); data.append(1); + data.append(-1); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_i8/input_1.cairo b/tests/nodes/or_i8/input_1.cairo index fa6f014b9..4810b255f 100644 --- a/tests/nodes/or_i8/input_1.cairo +++ b/tests/nodes/or_i8/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorMul}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-3); data.append(-2); - data.append(-1); - data.append(-1); data.append(1); data.append(0); - data.append(1); - data.append(-3); - data.append(1); - data.append(-1); - data.append(0); data.append(-1); data.append(0); data.append(-3); - data.append(2); data.append(-2); + data.append(1); + data.append(-1); + data.append(1); + data.append(-1); data.append(-2); - data.append(2); - data.append(2); - data.append(2); + data.append(1); data.append(2); data.append(-1); - data.append(2); + data.append(-1); + data.append(-3); data.append(-3); + data.append(1); + data.append(-2); + data.append(1); data.append(-1); data.append(-1); data.append(-3); + data.append(2); + data.append(2); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_i8/output_0.cairo b/tests/nodes/or_i8/output_0.cairo index 8e18a844b..77eb5ca09 100644 --- a/tests/nodes/or_i8/output_0.cairo +++ b/tests/nodes/or_i8/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -19,7 +20,7 @@ fn output_0() -> Tensor { data.append(1); data.append(1); data.append(1); - data.append(0); + data.append(1); data.append(1); data.append(1); data.append(1); diff --git a/tests/nodes/or_i8_broadcast.cairo b/tests/nodes/or_i8_broadcast.cairo index a2d044fc7..4abc3dda7 100644 --- a/tests/nodes/or_i8_broadcast.cairo +++ b/tests/nodes/or_i8_broadcast.cairo @@ -3,12 +3,12 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{I8Tensor, I8TensorMul}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; #[test] @@ -16,9 +16,9 @@ use orion::utils::{assert_eq, assert_seq_eq}; fn test_or_i8_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.or(@input_1); + let y_0 = input_0.or(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/or_i8_broadcast/input_0.cairo b/tests/nodes/or_i8_broadcast/input_0.cairo index 72331f26f..2f89a2efc 100644 --- a/tests/nodes/or_i8_broadcast/input_0.cairo +++ b/tests/nodes/or_i8_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorMul}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-1); - data.append(-2); data.append(2); - data.append(-1); + data.append(2); + data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_i8_broadcast/input_1.cairo b/tests/nodes/or_i8_broadcast/input_1.cairo index d45d6dc9e..0c38cf889 100644 --- a/tests/nodes/or_i8_broadcast/input_1.cairo +++ b/tests/nodes/or_i8_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorMul}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-3); data.append(-1); + data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_i8_broadcast/output_0.cairo b/tests/nodes/or_i8_broadcast/output_0.cairo index 2041bdce3..0367c57b6 100644 --- a/tests/nodes/or_i8_broadcast/output_0.cairo +++ b/tests/nodes/or_i8_broadcast/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); diff --git a/tests/nodes/or_u32.cairo b/tests/nodes/or_u32.cairo index 9d3007f49..9cff3f789 100644 --- a/tests/nodes/or_u32.cairo +++ b/tests/nodes/or_u32.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_or_u32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.or(@input_1); + let y_0 = input_0.or(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/or_u32/input_0.cairo b/tests/nodes/or_u32/input_0.cairo index 255e0877e..2b85e47b8 100644 --- a/tests/nodes/or_u32/input_0.cairo +++ b/tests/nodes/or_u32/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); data.append(5); + data.append(4); + data.append(4); data.append(5); data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); data.append(5); - data.append(4); - data.append(0); data.append(5); data.append(4); - data.append(2); data.append(3); data.append(1); data.append(0); + data.append(2); + data.append(3); data.append(1); + data.append(3); + data.append(4); data.append(5); - data.append(0); data.append(5); - data.append(2); data.append(3); data.append(0); data.append(0); + data.append(2); data.append(5); + data.append(4); + data.append(1); data.append(5); + data.append(3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_u32/input_1.cairo b/tests/nodes/or_u32/input_1.cairo index 53e2c0f87..77b154db3 100644 --- a/tests/nodes/or_u32/input_1.cairo +++ b/tests/nodes/or_u32/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(4); + data.append(1); data.append(1); data.append(5); - data.append(3); - data.append(3); - data.append(0); - data.append(0); - data.append(4); data.append(4); + data.append(0); + data.append(5); data.append(1); data.append(1); - data.append(1); - data.append(5); - data.append(3); - data.append(2); - data.append(2); data.append(5); data.append(4); - data.append(3); + data.append(0); data.append(4); + data.append(5); + data.append(0); + data.append(3); data.append(2); data.append(4); + data.append(3); data.append(5); + data.append(2); + data.append(0); data.append(4); + data.append(0); + data.append(0); + data.append(2); data.append(2); - data.append(1); data.append(5); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_u32/output_0.cairo b/tests/nodes/or_u32/output_0.cairo index 70e2b10b7..221f32074 100644 --- a/tests/nodes/or_u32/output_0.cairo +++ b/tests/nodes/or_u32/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -13,14 +14,13 @@ fn output_0() -> Tensor { data.append(1); data.append(1); data.append(1); - data.append(1); - data.append(0); data.append(0); data.append(1); data.append(1); data.append(1); data.append(1); data.append(1); + data.append(0); data.append(1); data.append(1); data.append(1); @@ -30,6 +30,7 @@ fn output_0() -> Tensor { data.append(1); data.append(1); data.append(1); + data.append(0); data.append(1); data.append(1); data.append(1); diff --git a/tests/nodes/or_u32_broadcast.cairo b/tests/nodes/or_u32_broadcast.cairo index aa697b02d..13094f778 100644 --- a/tests/nodes/or_u32_broadcast.cairo +++ b/tests/nodes/or_u32_broadcast.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_or_u32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.or(@input_1); + let y_0 = input_0.or(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/or_u32_broadcast/input_0.cairo b/tests/nodes/or_u32_broadcast/input_0.cairo index 888c3fd7b..dd8ecf366 100644 --- a/tests/nodes/or_u32_broadcast/input_0.cairo +++ b/tests/nodes/or_u32_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(3); + data.append(5); + data.append(1); + data.append(0); data.append(2); - data.append(2); - data.append(3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_u32_broadcast/input_1.cairo b/tests/nodes/or_u32_broadcast/input_1.cairo index acdc8015e..7ee1bbea4 100644 --- a/tests/nodes/or_u32_broadcast/input_1.cairo +++ b/tests/nodes/or_u32_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); - data.append(5); + data.append(3); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_u32_broadcast/output_0.cairo b/tests/nodes/or_u32_broadcast/output_0.cairo index 2041bdce3..0367c57b6 100644 --- a/tests/nodes/or_u32_broadcast/output_0.cairo +++ b/tests/nodes/or_u32_broadcast/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorMul}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); diff --git a/tests/nodes/xor_fp16x16.cairo b/tests/nodes/xor_fp16x16.cairo index 8a54526f7..6f5c67bda 100644 --- a/tests/nodes/xor_fp16x16.cairo +++ b/tests/nodes/xor_fp16x16.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::FP16x16TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_xor_fp16x16() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.xor(@input_1); + let y_0 = input_0.xor(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/xor_fp16x16/input_0.cairo b/tests/nodes/xor_fp16x16/input_0.cairo index 147abd897..a895df696 100644 --- a/tests/nodes/xor_fp16x16/input_0.cairo +++ b/tests/nodes/xor_fp16x16/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_fp16x16/input_1.cairo b/tests/nodes/xor_fp16x16/input_1.cairo index b87dfda30..1d2d646c2 100644 --- a/tests/nodes/xor_fp16x16/input_1.cairo +++ b/tests/nodes/xor_fp16x16/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_fp16x16/output_0.cairo b/tests/nodes/xor_fp16x16/output_0.cairo index 36a93ca20..0e89fbf7a 100644 --- a/tests/nodes/xor_fp16x16/output_0.cairo +++ b/tests/nodes/xor_fp16x16/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); data.append(0); - data.append(1); data.append(0); - data.append(1); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_fp16x16_broadcast.cairo b/tests/nodes/xor_fp16x16_broadcast.cairo index a8d3f37e0..3e3660dee 100644 --- a/tests/nodes/xor_fp16x16_broadcast.cairo +++ b/tests/nodes/xor_fp16x16_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::FP16x16TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_xor_fp16x16_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.xor(@input_1); + let y_0 = input_0.xor(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/xor_fp16x16_broadcast/input_0.cairo b/tests/nodes/xor_fp16x16_broadcast/input_0.cairo index f96f54173..e3279c555 100644 --- a/tests/nodes/xor_fp16x16_broadcast/input_0.cairo +++ b/tests/nodes/xor_fp16x16_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -10,8 +10,8 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_fp16x16_broadcast/input_1.cairo b/tests/nodes/xor_fp16x16_broadcast/input_1.cairo index edf1408cd..59a06f83b 100644 --- a/tests/nodes/xor_fp16x16_broadcast/input_1.cairo +++ b/tests/nodes/xor_fp16x16_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_fp16x16_broadcast/output_0.cairo b/tests/nodes/xor_fp16x16_broadcast/output_0.cairo index 069598d33..897d076d9 100644 --- a/tests/nodes/xor_fp16x16_broadcast/output_0.cairo +++ b/tests/nodes/xor_fp16x16_broadcast/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); data.append(1); + data.append(1); + data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_fp8x23.cairo b/tests/nodes/xor_fp8x23.cairo index 7d89727de..577e1d365 100644 --- a/tests/nodes/xor_fp8x23.cairo +++ b/tests/nodes/xor_fp8x23.cairo @@ -4,21 +4,21 @@ mod output_0; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; #[test] #[available_gas(2000000000)] fn test_xor_fp8x23() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.xor(@input_1); + let y_0 = input_0.xor(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/xor_fp8x23/input_0.cairo b/tests/nodes/xor_fp8x23/input_0.cairo index 099b5378c..aa4155752 100644 --- a/tests/nodes/xor_fp8x23/input_0.cairo +++ b/tests/nodes/xor_fp8x23/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_fp8x23/input_1.cairo b/tests/nodes/xor_fp8x23/input_1.cairo index b5883e501..d67ef0695 100644 --- a/tests/nodes/xor_fp8x23/input_1.cairo +++ b/tests/nodes/xor_fp8x23/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_fp8x23/output_0.cairo b/tests/nodes/xor_fp8x23/output_0.cairo index 3a99f04f6..451f25ccc 100644 --- a/tests/nodes/xor_fp8x23/output_0.cairo +++ b/tests/nodes/xor_fp8x23/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -15,13 +16,13 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); - data.append(1); data.append(0); - data.append(1); data.append(0); + data.append(1); data.append(0); data.append(1); data.append(0); + data.append(0); data.append(1); data.append(0); data.append(0); @@ -29,12 +30,12 @@ fn output_0() -> Tensor { data.append(1); data.append(0); data.append(0); + data.append(0); data.append(1); data.append(1); data.append(0); data.append(0); data.append(0); - data.append(1); data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_fp8x23_broadcast.cairo b/tests/nodes/xor_fp8x23_broadcast.cairo index 30a57eb23..aeb36be85 100644 --- a/tests/nodes/xor_fp8x23_broadcast.cairo +++ b/tests/nodes/xor_fp8x23_broadcast.cairo @@ -4,21 +4,21 @@ mod output_0; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; #[test] #[available_gas(2000000000)] fn test_xor_fp8x23_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.xor(@input_1); + let y_0 = input_0.xor(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/xor_fp8x23_broadcast/input_0.cairo b/tests/nodes/xor_fp8x23_broadcast/input_0.cairo index 68f8cc262..11337c926 100644 --- a/tests/nodes/xor_fp8x23_broadcast/input_0.cairo +++ b/tests/nodes/xor_fp8x23_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_fp8x23_broadcast/input_1.cairo b/tests/nodes/xor_fp8x23_broadcast/input_1.cairo index edba895b1..8f5154796 100644 --- a/tests/nodes/xor_fp8x23_broadcast/input_1.cairo +++ b/tests/nodes/xor_fp8x23_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_fp8x23_broadcast/output_0.cairo b/tests/nodes/xor_fp8x23_broadcast/output_0.cairo index 83a3b1e6a..0e89fbf7a 100644 --- a/tests/nodes/xor_fp8x23_broadcast/output_0.cairo +++ b/tests/nodes/xor_fp8x23_broadcast/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); data.append(0); - data.append(1); + data.append(0); + data.append(0); data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i32.cairo b/tests/nodes/xor_i32.cairo index d1b6a7705..b6900fc73 100644 --- a/tests/nodes/xor_i32.cairo +++ b/tests/nodes/xor_i32.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; #[test] #[available_gas(2000000000)] fn test_xor_i32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.xor(@input_1); + let y_0 = input_0.xor(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/xor_i32/input_0.cairo b/tests/nodes/xor_i32/input_0.cairo index 6bffe1752..6b8c58eda 100644 --- a/tests/nodes/xor_i32/input_0.cairo +++ b/tests/nodes/xor_i32/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(1); + data.append(-1); + data.append(-2); + data.append(-1); data.append(2); + data.append(-3); + data.append(-2); + data.append(-1); + data.append(-3); + data.append(-1); data.append(1); data.append(1); + data.append(-1); data.append(0); - data.append(2); - data.append(2); + data.append(-1); data.append(2); data.append(2); data.append(-1); data.append(2); - data.append(-1); + data.append(-2); data.append(0); - data.append(2); - data.append(1); - data.append(1); - data.append(-3); - data.append(-3); - data.append(-3); - data.append(-1); - data.append(-3); data.append(-1); data.append(-2); - data.append(2); - data.append(1); + data.append(-1); + data.append(-1); data.append(0); data.append(0); - data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i32/input_1.cairo b/tests/nodes/xor_i32/input_1.cairo index b857282fd..77387de26 100644 --- a/tests/nodes/xor_i32/input_1.cairo +++ b/tests/nodes/xor_i32/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-1); - data.append(-3); + data.append(1); data.append(-2); data.append(0); - data.append(2); - data.append(2); - data.append(2); data.append(-3); - data.append(0); - data.append(0); - data.append(-1); data.append(1); data.append(-2); data.append(1); + data.append(1); data.append(-2); + data.append(-1); data.append(0); - data.append(0); - data.append(-3); - data.append(1); - data.append(1); + data.append(2); data.append(1); data.append(-2); + data.append(2); + data.append(2); + data.append(0); + data.append(0); data.append(1); - data.append(-2); data.append(1); + data.append(0); + data.append(2); data.append(-3); - data.append(-2); + data.append(2); + data.append(2); + data.append(2); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i32/output_0.cairo b/tests/nodes/xor_i32/output_0.cairo index 323d9bf01..828a00d81 100644 --- a/tests/nodes/xor_i32/output_0.cairo +++ b/tests/nodes/xor_i32/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -11,18 +12,19 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); data.append(0); data.append(0); data.append(0); - data.append(1); - data.append(1); data.append(0); data.append(1); data.append(0); data.append(0); + data.append(1); + data.append(0); data.append(0); data.append(1); data.append(1); @@ -34,7 +36,6 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(1); - data.append(1); data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i32_broadcast.cairo b/tests/nodes/xor_i32_broadcast.cairo index 6a118bfb4..b4b9d9d87 100644 --- a/tests/nodes/xor_i32_broadcast.cairo +++ b/tests/nodes/xor_i32_broadcast.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; #[test] #[available_gas(2000000000)] fn test_xor_i32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.xor(@input_1); + let y_0 = input_0.xor(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/xor_i32_broadcast/input_0.cairo b/tests/nodes/xor_i32_broadcast/input_0.cairo index af954e380..fe474ce81 100644 --- a/tests/nodes/xor_i32_broadcast/input_0.cairo +++ b/tests/nodes/xor_i32_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-2); + data.append(2); + data.append(1); + data.append(2); data.append(0); - data.append(-3); - data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i32_broadcast/input_1.cairo b/tests/nodes/xor_i32_broadcast/input_1.cairo index e49c73fb5..cdf0184f0 100644 --- a/tests/nodes/xor_i32_broadcast/input_1.cairo +++ b/tests/nodes/xor_i32_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(2); - data.append(-3); + data.append(0); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i32_broadcast/output_0.cairo b/tests/nodes/xor_i32_broadcast/output_0.cairo index 94c384136..085034f13 100644 --- a/tests/nodes/xor_i32_broadcast/output_0.cairo +++ b/tests/nodes/xor_i32_broadcast/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); data.append(1); data.append(0); - data.append(0); + data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i8.cairo b/tests/nodes/xor_i8.cairo index 0ba2c0257..8437bbf13 100644 --- a/tests/nodes/xor_i8.cairo +++ b/tests/nodes/xor_i8.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; use orion::operators::tensor::I8TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use orion::operators::tensor::I32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; #[test] #[available_gas(2000000000)] fn test_xor_i8() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.xor(@input_1); + let y_0 = input_0.xor(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/xor_i8/input_0.cairo b/tests/nodes/xor_i8/input_0.cairo index 13e2952b5..8eb98bdfe 100644 --- a/tests/nodes/xor_i8/input_0.cairo +++ b/tests/nodes/xor_i8/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-3); - data.append(2); - data.append(-3); - data.append(-1); data.append(2); data.append(1); - data.append(-3); + data.append(-1); + data.append(0); data.append(2); data.append(-3); - data.append(-1); - data.append(-2); - data.append(-1); data.append(0); data.append(-1); data.append(1); data.append(-3); - data.append(-1); data.append(-2); data.append(-3); - data.append(-2); - data.append(-1); data.append(-3); - data.append(-2); + data.append(2); + data.append(1); data.append(0); data.append(2); + data.append(0); + data.append(-2); + data.append(-2); + data.append(0); + data.append(-2); data.append(-3); + data.append(2); data.append(1); + data.append(0); + data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i8/input_1.cairo b/tests/nodes/xor_i8/input_1.cairo index 700afe506..ade07f6d4 100644 --- a/tests/nodes/xor_i8/input_1.cairo +++ b/tests/nodes/xor_i8/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -11,30 +12,30 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(2); data.append(-1); + data.append(-2); + data.append(-3); + data.append(-2); + data.append(2); + data.append(-3); data.append(1); + data.append(-1); data.append(1); - data.append(2); - data.append(2); + data.append(-1); data.append(1); - data.append(-3); data.append(1); - data.append(-3); - data.append(2); + data.append(0); + data.append(0); data.append(-2); data.append(-3); - data.append(-2); - data.append(2); - data.append(-2); - data.append(2); - data.append(2); + data.append(1); + data.append(1); + data.append(-1); + data.append(0); + data.append(-1); data.append(0); data.append(-2); data.append(2); data.append(-3); - data.append(-2); - data.append(-2); - data.append(1); - data.append(-3); - data.append(-2); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i8/output_0.cairo b/tests/nodes/xor_i8/output_0.cairo index 8affaf526..f4cc18d31 100644 --- a/tests/nodes/xor_i8/output_0.cairo +++ b/tests/nodes/xor_i8/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -12,9 +13,10 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); + data.append(1); data.append(0); data.append(0); - data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); @@ -22,10 +24,8 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); + data.append(1); + data.append(1); data.append(0); data.append(1); data.append(0); @@ -35,6 +35,7 @@ fn output_0() -> Tensor { data.append(1); data.append(0); data.append(0); + data.append(1); data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i8_broadcast.cairo b/tests/nodes/xor_i8_broadcast.cairo index 98e31be33..1d7ed2330 100644 --- a/tests/nodes/xor_i8_broadcast.cairo +++ b/tests/nodes/xor_i8_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; use orion::operators::tensor::I8TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use orion::operators::tensor::I32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::utils::{assert_eq, assert_seq_eq}; #[test] #[available_gas(2000000000)] fn test_xor_i8_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.xor(@input_1); + let y_0 = input_0.xor(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/xor_i8_broadcast/input_0.cairo b/tests/nodes/xor_i8_broadcast/input_0.cairo index 402d98a0a..7aadf0945 100644 --- a/tests/nodes/xor_i8_broadcast/input_0.cairo +++ b/tests/nodes/xor_i8_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); - data.append(2); + data.append(-3); + data.append(-2); + data.append(-3); data.append(1); - data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i8_broadcast/input_1.cairo b/tests/nodes/xor_i8_broadcast/input_1.cairo index c5b62636c..9c02b355b 100644 --- a/tests/nodes/xor_i8_broadcast/input_1.cairo +++ b/tests/nodes/xor_i8_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-2); - data.append(-3); + data.append(2); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i8_broadcast/output_0.cairo b/tests/nodes/xor_i8_broadcast/output_0.cairo index 01569b35a..5614176ce 100644 --- a/tests/nodes/xor_i8_broadcast/output_0.cairo +++ b/tests/nodes/xor_i8_broadcast/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); data.append(0); + data.append(1); data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_u32.cairo b/tests/nodes/xor_u32.cairo index 739d60af4..0b02997b7 100644 --- a/tests/nodes/xor_u32.cairo +++ b/tests/nodes/xor_u32.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; #[test] #[available_gas(2000000000)] fn test_xor_u32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.xor(@input_1); + let y_0 = input_0.xor(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/xor_u32/input_0.cairo b/tests/nodes/xor_u32/input_0.cairo index ce0703451..92bab79ef 100644 --- a/tests/nodes/xor_u32/input_0.cairo +++ b/tests/nodes/xor_u32/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,31 +11,31 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); - data.append(1); - data.append(1); + data.append(4); data.append(5); - data.append(3); data.append(1); - data.append(2); - data.append(2); data.append(0); data.append(0); - data.append(5); - data.append(2); - data.append(1); + data.append(3); + data.append(0); data.append(2); - data.append(1); - data.append(1); + data.append(3); + data.append(3); + data.append(3); data.append(5); data.append(2); + data.append(2); data.append(0); + data.append(4); + data.append(3); data.append(0); - data.append(0); - data.append(2); - data.append(1); - data.append(1); data.append(4); data.append(1); - data.append(5); + data.append(4); + data.append(3); + data.append(2); + data.append(2); + data.append(3); + data.append(3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_u32/input_1.cairo b/tests/nodes/xor_u32/input_1.cairo index 1e770e8dd..7d0abdeab 100644 --- a/tests/nodes/xor_u32/input_1.cairo +++ b/tests/nodes/xor_u32/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(4); - data.append(4); - data.append(2); data.append(5); + data.append(3); + data.append(4); + data.append(1); data.append(0); - data.append(5); - data.append(2); data.append(3); + data.append(1); + data.append(4); + data.append(1); data.append(0); data.append(5); - data.append(0); + data.append(5); data.append(2); - data.append(0); + data.append(3); data.append(2); data.append(1); - data.append(5); - data.append(1); - data.append(5); data.append(4); - data.append(4); - data.append(0); data.append(3); + data.append(3); + data.append(2); + data.append(2); data.append(1); - data.append(1); + data.append(3); + data.append(5); data.append(0); - data.append(1); + data.append(2); data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_u32/output_0.cairo b/tests/nodes/xor_u32/output_0.cairo index e32c287bf..94e773bce 100644 --- a/tests/nodes/xor_u32/output_0.cairo +++ b/tests/nodes/xor_u32/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -13,12 +14,9 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); data.append(0); data.append(1); + data.append(0); data.append(1); data.append(0); data.append(1); @@ -28,11 +26,14 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(1); + data.append(0); + data.append(0); data.append(1); data.append(0); data.append(0); data.append(0); data.append(0); + data.append(0); data.append(1); data.append(0); data.append(0); diff --git a/tests/nodes/xor_u32_broadcast.cairo b/tests/nodes/xor_u32_broadcast.cairo index 9c5abf64f..0fa6a593c 100644 --- a/tests/nodes/xor_u32_broadcast.cairo +++ b/tests/nodes/xor_u32_broadcast.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; #[test] #[available_gas(2000000000)] fn test_xor_u32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.xor(@input_1); + let y_0 = input_0.xor(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/xor_u32_broadcast/input_0.cairo b/tests/nodes/xor_u32_broadcast/input_0.cairo index 48012ef07..58317b7f3 100644 --- a/tests/nodes/xor_u32_broadcast/input_0.cairo +++ b/tests/nodes/xor_u32_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); + data.append(4); + data.append(4); data.append(2); - data.append(0); - data.append(1); - data.append(1); + data.append(4); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_u32_broadcast/input_1.cairo b/tests/nodes/xor_u32_broadcast/input_1.cairo index d6a15bdd7..57bf57831 100644 --- a/tests/nodes/xor_u32_broadcast/input_1.cairo +++ b/tests/nodes/xor_u32_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(3); - data.append(0); + data.append(4); + data.append(4); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_u32_broadcast/output_0.cairo b/tests/nodes/xor_u32_broadcast/output_0.cairo index 069598d33..0e89fbf7a 100644 --- a/tests/nodes/xor_u32_broadcast/output_0.cairo +++ b/tests/nodes/xor_u32_broadcast/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); @@ -11,6 +12,6 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); - data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } From 976563f3abd61dd717d00a0b90afc6a70e7bbb3b Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Fri, 22 Mar 2024 10:05:30 +0100 Subject: [PATCH 30/68] update doc --- docs/framework/operators/tensor/tensor.and.md | 6 +++--- docs/framework/operators/tensor/tensor.equal.md | 8 ++++---- docs/framework/operators/tensor/tensor.greater.md | 8 ++++---- docs/framework/operators/tensor/tensor.greater_equal.md | 6 +++--- docs/framework/operators/tensor/tensor.less.md | 6 +++--- docs/framework/operators/tensor/tensor.less_equal.md | 6 +++--- docs/framework/operators/tensor/tensor.not.md | 4 ++-- docs/framework/operators/tensor/tensor.or.md | 8 ++++---- docs/framework/operators/tensor/tensor.xor.md | 8 ++++---- 9 files changed, 30 insertions(+), 30 deletions(-) diff --git a/docs/framework/operators/tensor/tensor.and.md b/docs/framework/operators/tensor/tensor.and.md index 58b13651b..20b4e8979 100644 --- a/docs/framework/operators/tensor/tensor.and.md +++ b/docs/framework/operators/tensor/tensor.and.md @@ -1,7 +1,7 @@ #tensor.and ```rust - fn and(self: @Tensor, other: @Tensor) -> Tensor; + fn and(self: @Tensor, other: @Tensor) -> Tensor; ``` Computes the logical AND of two tensors element-wise. @@ -20,7 +20,7 @@ The input tensors must have either: ## Returns -A new `Tensor` with the same shape as the broadcasted inputs. +A new `Tensor` with the same shape as the broadcasted inputs. ## Examples @@ -29,7 +29,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, BoolTensor}; -fn and_example() -> Tensor { +fn and_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3].span(), data: array![false, true, false, false, false, true, true, false, true, false, false, true].span(), ); diff --git a/docs/framework/operators/tensor/tensor.equal.md b/docs/framework/operators/tensor/tensor.equal.md index c5157498a..91599531c 100644 --- a/docs/framework/operators/tensor/tensor.equal.md +++ b/docs/framework/operators/tensor/tensor.equal.md @@ -1,7 +1,7 @@ #tensor.equal ```rust - fn equal(self: @Tensor, other: @Tensor) -> Tensor; + fn equal(self: @Tensor, other: @Tensor) -> Tensor; ``` Check if two tensors are equal element-wise. @@ -20,7 +20,7 @@ The input tensors must have either: ## Returns -A new `Tensor` of booleans (1 if equal, 0 otherwise) with the same shape as the broadcasted inputs. +A new `Tensor` (1 if equal, 0 otherwise) with the same shape as the broadcasted inputs. ## Examples @@ -31,7 +31,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn eq_example() -> Tensor { +fn eq_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); @@ -53,7 +53,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn eq_example() -> Tensor { +fn eq_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); diff --git a/docs/framework/operators/tensor/tensor.greater.md b/docs/framework/operators/tensor/tensor.greater.md index fb186b8f7..5704dc351 100644 --- a/docs/framework/operators/tensor/tensor.greater.md +++ b/docs/framework/operators/tensor/tensor.greater.md @@ -1,7 +1,7 @@ #tensor.greater ```rust - fn greater(self: @Tensor, other: @Tensor) -> Tensor; + fn greater(self: @Tensor, other: @Tensor) -> Tensor; ``` Check if each element of the first tensor is greater than the corresponding element of the second tensor. @@ -20,7 +20,7 @@ The input tensors must have either: ## Returns -A new `Tensor` of booleans (0 or 1) with the same shape as the broadcasted inputs. +A new `Tensor` of booleans (0 or 1) with the same shape as the broadcasted inputs. ## Examples @@ -31,7 +31,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn greater_example() -> Tensor { +fn greater_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); @@ -53,7 +53,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn greater_example() -> Tensor { +fn greater_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); diff --git a/docs/framework/operators/tensor/tensor.greater_equal.md b/docs/framework/operators/tensor/tensor.greater_equal.md index 1fecfbdc4..ed42a785c 100644 --- a/docs/framework/operators/tensor/tensor.greater_equal.md +++ b/docs/framework/operators/tensor/tensor.greater_equal.md @@ -1,7 +1,7 @@ #tensor.greater_equal ```rust - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor; + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor; ``` Check if each element of the first tensor is greater than or equal to the corresponding element of the second tensor. @@ -31,7 +31,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn greater_equal_example() -> Tensor { +fn greater_equal_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); @@ -53,7 +53,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn greater_equal_example() -> Tensor { +fn greater_equal_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); diff --git a/docs/framework/operators/tensor/tensor.less.md b/docs/framework/operators/tensor/tensor.less.md index 96586b346..b5f56b6a4 100644 --- a/docs/framework/operators/tensor/tensor.less.md +++ b/docs/framework/operators/tensor/tensor.less.md @@ -1,7 +1,7 @@ #tensor.less ```rust - fn less(self: @Tensor, other: @Tensor) -> Tensor; + fn less(self: @Tensor, other: @Tensor) -> Tensor; ``` Check if each element of the first tensor is less than the corresponding element of the second tensor. @@ -31,7 +31,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn less_example() -> Tensor { +fn less_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); @@ -53,7 +53,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn less_example() -> Tensor { +fn less_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); diff --git a/docs/framework/operators/tensor/tensor.less_equal.md b/docs/framework/operators/tensor/tensor.less_equal.md index c440b39c6..68aa87c92 100644 --- a/docs/framework/operators/tensor/tensor.less_equal.md +++ b/docs/framework/operators/tensor/tensor.less_equal.md @@ -1,7 +1,7 @@ #tensor.less_equal ```rust - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor; + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor; ``` Check if each element of the first tensor is less than or equal to the corresponding element of the second tensor. @@ -31,7 +31,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn less_equal_example() -> Tensor { +fn less_equal_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); @@ -53,7 +53,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn less_equal_example() -> Tensor { +fn less_equal_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); diff --git a/docs/framework/operators/tensor/tensor.not.md b/docs/framework/operators/tensor/tensor.not.md index ee482ec65..e99860400 100644 --- a/docs/framework/operators/tensor/tensor.not.md +++ b/docs/framework/operators/tensor/tensor.not.md @@ -1,7 +1,7 @@ #tensor.not ```rust - fn not(self: @Tensor) -> Tensor) -> Tensor; ``` Computes the negation of the elements in the bool type input tensor. @@ -23,7 +23,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, BoolTensor}; -fn not_example() -> Tensor { +fn not_example() -> Tensor { let tensor = TensorTrait::new( shape: array![3].span(), data: array![ diff --git a/docs/framework/operators/tensor/tensor.or.md b/docs/framework/operators/tensor/tensor.or.md index ff210a043..aa26792a3 100644 --- a/docs/framework/operators/tensor/tensor.or.md +++ b/docs/framework/operators/tensor/tensor.or.md @@ -1,7 +1,7 @@ #tensor.or ```rust - fn or(self: @Tensor, other: @Tensor) -> Tensor; + fn or(self: @Tensor, other: @Tensor) -> Tensor; ``` Computes the logical OR of two tensors element-wise. @@ -20,7 +20,7 @@ The input tensors must have either: ## Returns -A new `Tensor` of booleans (0 or 1) with the same shape as the broadcasted inputs. +A new `Tensor` (0 or 1) with the same shape as the broadcasted inputs. ## Examples @@ -31,7 +31,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn or_example() -> Tensor { +fn or_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); @@ -52,7 +52,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn or_example() -> Tensor { +fn or_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); diff --git a/docs/framework/operators/tensor/tensor.xor.md b/docs/framework/operators/tensor/tensor.xor.md index 89e4c41ea..ea4477f62 100644 --- a/docs/framework/operators/tensor/tensor.xor.md +++ b/docs/framework/operators/tensor/tensor.xor.md @@ -1,7 +1,7 @@ #tensor.xor ```rust - fn xor(self: @Tensor, other: @Tensor) -> Tensor; + fn xor(self: @Tensor, other: @Tensor) -> Tensor; ``` Computes the logical XOR of two tensors element-wise. @@ -20,7 +20,7 @@ The input tensors must have either: ## Returns -A new `Tensor` of booleans (0 or 1) with the same shape as the broadcasted inputs. +A new `Tensor` (0 or 1) with the same shape as the broadcasted inputs. ## Examples @@ -31,7 +31,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn xor_example() -> Tensor { +fn xor_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); @@ -52,7 +52,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn xor_example() -> Tensor { +fn xor_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); From db3b766fd1e758af85a692c191830a10b04596e7 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Fri, 22 Mar 2024 15:30:20 +0100 Subject: [PATCH 31/68] revert to fb6f4a --- .DS_Store | Bin 0 -> 6148 bytes Scarb.toml | 4 +- docs/framework/operators/tensor/README.md | 3 +- docs/framework/operators/tensor/tensor.and.md | 6 +- .../operators/tensor/tensor.equal.md | 12 +- .../operators/tensor/tensor.greater.md | 8 +- .../operators/tensor/tensor.greater_equal.md | 6 +- .../operators/tensor/tensor.is_inf.md | 4 +- .../operators/tensor/tensor.is_nan.md | 4 +- .../framework/operators/tensor/tensor.less.md | 8 +- .../operators/tensor/tensor.less_equal.md | 6 +- docs/framework/operators/tensor/tensor.not.md | 2 +- docs/framework/operators/tensor/tensor.or.md | 8 +- .../operators/tensor/tensor.reduce_sum.md | 13 +- .../tensor/tensor.reduce_sum_single_axis.md | 39 --- .../operators/tensor/tensor.reshape.md | 4 +- docs/framework/operators/tensor/tensor.xor.md | 8 +- nodegen/file_manager.py | 35 +- nodegen/node/and.py | 4 +- nodegen/node/equal.py | 20 +- nodegen/node/greater.py | 20 +- nodegen/node/greater_equal.py | 20 +- nodegen/node/is_inf.py | 138 +++++++- nodegen/node/is_nan.py | 19 +- nodegen/node/less.py | 20 +- nodegen/node/less_equal.py | 20 +- nodegen/node/not.py | 15 +- nodegen/node/or.py | 20 +- nodegen/node/reduce_sum.py | 304 +++++++++++++++--- nodegen/node/reduce_sum_single_axis.py | 288 ----------------- nodegen/node/reshape.py | 120 ------- nodegen/node/xor.py | 20 +- src/operators/nn/functional/col2im.cairo | 62 ++-- .../nn/functional/conv_transpose.cairo | 234 ++++++-------- .../nn/functional/depth_to_space.cairo | 25 +- src/operators/nn/functional/logsoftmax.cairo | 4 +- src/operators/nn/functional/softmax.cairo | 4 +- .../nn/functional/space_to_depth.cairo | 27 +- src/operators/tensor/core.cairo | 223 ++++--------- .../tensor/implementations/tensor_bool.cairo | 47 ++- .../implementations/tensor_complex64.cairo | 53 ++- .../implementations/tensor_fp16x16.cairo | 51 ++- .../implementations/tensor_fp16x16wide.cairo | 51 ++- .../implementations/tensor_fp32x32.cairo | 52 ++- .../implementations/tensor_fp64x64.cairo | 51 ++- .../implementations/tensor_fp8x23.cairo | 53 ++- .../implementations/tensor_fp8x23wide.cairo | 53 ++- .../tensor/implementations/tensor_i32.cairo | 48 ++- .../tensor/implementations/tensor_i8.cairo | 49 ++- .../tensor/implementations/tensor_u32.cairo | 49 ++- .../manipulation/split_to_sequence.cairo | 170 +++++----- src/operators/tensor/math.cairo | 1 - src/operators/tensor/math/and.cairo | 14 +- src/operators/tensor/math/equal.cairo | 7 +- src/operators/tensor/math/flatten.cairo | 8 +- src/operators/tensor/math/greater.cairo | 7 +- src/operators/tensor/math/greater_equal.cairo | 7 +- src/operators/tensor/math/is_inf.cairo | 40 +-- src/operators/tensor/math/is_nan.cairo | 14 +- .../tensor/math/layer_normalization.cairo | 24 +- src/operators/tensor/math/less.cairo | 7 +- src/operators/tensor/math/less_equal.cairo | 7 +- src/operators/tensor/math/or.cairo | 7 +- src/operators/tensor/math/reduce_l1.cairo | 3 +- src/operators/tensor/math/reduce_l2.cairo | 9 +- .../tensor/math/reduce_log_sum.cairo | 3 +- src/operators/tensor/math/reduce_sum.cairo | 141 +++----- .../tensor/math/reduce_sum_single_axis.cairo | 106 ------ .../tensor/math/reduce_sum_square.cairo | 3 +- src/operators/tensor/math/xor.cairo | 7 +- tests/nodes.cairo | 73 ++--- tests/nodes/and_bool.cairo | 12 +- tests/nodes/and_bool/input_0.cairo | 10 +- tests/nodes/and_bool/input_1.cairo | 4 +- tests/nodes/and_bool/output_0.cairo | 29 +- tests/nodes/and_bool_broadcast.cairo | 12 +- tests/nodes/and_bool_broadcast/input_0.cairo | 24 +- tests/nodes/and_bool_broadcast/input_1.cairo | 34 +- tests/nodes/and_bool_broadcast/output_0.cairo | 125 ++++--- tests/nodes/equal_fp16x16.cairo | 16 +- tests/nodes/equal_fp16x16/input_0.cairo | 32 +- tests/nodes/equal_fp16x16/input_1.cairo | 26 +- tests/nodes/equal_fp16x16/output_0.cairo | 15 +- tests/nodes/equal_fp16x16_broadcast.cairo | 16 +- .../equal_fp16x16_broadcast/input_0.cairo | 6 +- .../equal_fp16x16_broadcast/input_1.cairo | 4 +- .../equal_fp16x16_broadcast/output_0.cairo | 7 +- tests/nodes/equal_fp8x23.cairo | 16 +- tests/nodes/equal_fp8x23/input_0.cairo | 30 +- tests/nodes/equal_fp8x23/input_1.cairo | 30 +- tests/nodes/equal_fp8x23/output_0.cairo | 13 +- tests/nodes/equal_fp8x23_broadcast.cairo | 16 +- .../equal_fp8x23_broadcast/input_0.cairo | 8 +- .../equal_fp8x23_broadcast/input_1.cairo | 4 +- .../equal_fp8x23_broadcast/output_0.cairo | 7 +- tests/nodes/equal_i32.cairo | 14 +- tests/nodes/equal_i32/input_0.cairo | 27 +- tests/nodes/equal_i32/input_1.cairo | 33 +- tests/nodes/equal_i32/output_0.cairo | 11 +- tests/nodes/equal_i32_broadcast.cairo | 14 +- tests/nodes/equal_i32_broadcast/input_0.cairo | 7 +- tests/nodes/equal_i32_broadcast/input_1.cairo | 5 +- .../nodes/equal_i32_broadcast/output_0.cairo | 7 +- tests/nodes/equal_i8.cairo | 14 +- tests/nodes/equal_i8/input_0.cairo | 33 +- tests/nodes/equal_i8/input_1.cairo | 31 +- tests/nodes/equal_i8/output_0.cairo | 15 +- tests/nodes/equal_i8_broadcast.cairo | 14 +- tests/nodes/equal_i8_broadcast/input_0.cairo | 7 +- tests/nodes/equal_i8_broadcast/input_1.cairo | 7 +- tests/nodes/equal_i8_broadcast/output_0.cairo | 7 +- tests/nodes/equal_u32.cairo | 14 +- tests/nodes/equal_u32/input_0.cairo | 31 +- tests/nodes/equal_u32/input_1.cairo | 29 +- tests/nodes/equal_u32/output_0.cairo | 19 +- tests/nodes/equal_u32_broadcast.cairo | 14 +- tests/nodes/equal_u32_broadcast/input_0.cairo | 9 +- tests/nodes/equal_u32_broadcast/input_1.cairo | 5 +- .../nodes/equal_u32_broadcast/output_0.cairo | 5 +- tests/nodes/greater_equal_fp16x16.cairo | 16 +- .../nodes/greater_equal_fp16x16/input_0.cairo | 28 +- .../nodes/greater_equal_fp16x16/input_1.cairo | 28 +- .../greater_equal_fp16x16/output_0.cairo | 17 +- .../greater_equal_fp16x16_broadcast.cairo | 16 +- .../input_0.cairo | 32 +- .../input_1.cairo | 6 +- .../output_0.cairo | 23 +- tests/nodes/greater_equal_fp8x23.cairo | 14 +- .../nodes/greater_equal_fp8x23/input_0.cairo | 30 +- .../nodes/greater_equal_fp8x23/input_1.cairo | 24 +- .../nodes/greater_equal_fp8x23/output_0.cairo | 15 +- .../greater_equal_fp8x23_broadcast.cairo | 14 +- .../input_0.cairo | 22 +- .../input_1.cairo | 6 +- .../output_0.cairo | 15 +- tests/nodes/greater_equal_i32.cairo | 14 +- tests/nodes/greater_equal_i32/input_0.cairo | 31 +- tests/nodes/greater_equal_i32/input_1.cairo | 31 +- tests/nodes/greater_equal_i32/output_0.cairo | 19 +- tests/nodes/greater_equal_i32_broadcast.cairo | 14 +- .../greater_equal_i32_broadcast/input_0.cairo | 31 +- .../greater_equal_i32_broadcast/input_1.cairo | 9 +- .../output_0.cairo | 21 +- tests/nodes/greater_equal_i8.cairo | 16 +- tests/nodes/greater_equal_i8/input_0.cairo | 27 +- tests/nodes/greater_equal_i8/input_1.cairo | 27 +- tests/nodes/greater_equal_i8/output_0.cairo | 19 +- tests/nodes/greater_equal_i8_broadcast.cairo | 16 +- .../greater_equal_i8_broadcast/input_0.cairo | 27 +- .../greater_equal_i8_broadcast/input_1.cairo | 7 +- .../greater_equal_i8_broadcast/output_0.cairo | 23 +- tests/nodes/greater_equal_u32.cairo | 14 +- tests/nodes/greater_equal_u32/input_0.cairo | 29 +- tests/nodes/greater_equal_u32/input_1.cairo | 35 +- tests/nodes/greater_equal_u32/output_0.cairo | 19 +- tests/nodes/greater_equal_u32_broadcast.cairo | 14 +- .../greater_equal_u32_broadcast/input_0.cairo | 27 +- .../greater_equal_u32_broadcast/input_1.cairo | 7 +- .../output_0.cairo | 15 +- tests/nodes/greater_fp16x16.cairo | 16 +- tests/nodes/greater_fp16x16/input_0.cairo | 26 +- tests/nodes/greater_fp16x16/input_1.cairo | 30 +- tests/nodes/greater_fp16x16/output_0.cairo | 13 +- tests/nodes/greater_fp16x16_broadcast.cairo | 16 +- .../greater_fp16x16_broadcast/input_0.cairo | 8 +- .../greater_fp16x16_broadcast/input_1.cairo | 6 +- .../greater_fp16x16_broadcast/output_0.cairo | 9 +- tests/nodes/greater_fp8x23.cairo | 14 +- tests/nodes/greater_fp8x23/input_0.cairo | 34 +- tests/nodes/greater_fp8x23/input_1.cairo | 32 +- tests/nodes/greater_fp8x23/output_0.cairo | 19 +- tests/nodes/greater_fp8x23_broadcast.cairo | 14 +- .../greater_fp8x23_broadcast/input_0.cairo | 6 +- .../greater_fp8x23_broadcast/input_1.cairo | 6 +- .../greater_fp8x23_broadcast/output_0.cairo | 7 +- tests/nodes/greater_i32.cairo | 14 +- tests/nodes/greater_i32/input_0.cairo | 31 +- tests/nodes/greater_i32/input_1.cairo | 35 +- tests/nodes/greater_i32/output_0.cairo | 19 +- tests/nodes/greater_i32_broadcast.cairo | 14 +- .../nodes/greater_i32_broadcast/input_0.cairo | 9 +- .../nodes/greater_i32_broadcast/input_1.cairo | 7 +- .../greater_i32_broadcast/output_0.cairo | 7 +- tests/nodes/greater_i8.cairo | 16 +- tests/nodes/greater_i8/input_0.cairo | 31 +- tests/nodes/greater_i8/input_1.cairo | 33 +- tests/nodes/greater_i8/output_0.cairo | 21 +- tests/nodes/greater_i8_broadcast.cairo | 16 +- .../nodes/greater_i8_broadcast/input_0.cairo | 7 +- .../nodes/greater_i8_broadcast/input_1.cairo | 7 +- .../nodes/greater_i8_broadcast/output_0.cairo | 7 +- tests/nodes/greater_u32.cairo | 14 +- tests/nodes/greater_u32/input_0.cairo | 33 +- tests/nodes/greater_u32/input_1.cairo | 33 +- tests/nodes/greater_u32/output_0.cairo | 19 +- tests/nodes/greater_u32_broadcast.cairo | 14 +- .../nodes/greater_u32_broadcast/input_0.cairo | 9 +- .../nodes/greater_u32_broadcast/input_1.cairo | 7 +- .../greater_u32_broadcast/output_0.cairo | 9 +- ..._fp16x16_1D.cairo => is_inf_fp16x16.cairo} | 12 +- .../input_0.cairo | 11 +- tests/nodes/is_inf_fp16x16/output_0.cairo | 17 + ...is_fp8x23_1D.cairo => is_inf_fp8x23.cairo} | 14 +- .../input_0.cairo | 9 +- tests/nodes/is_inf_fp8x23/output_0.cairo | 17 + tests/nodes/is_inf_i32.cairo | 14 +- tests/nodes/is_inf_i32/input_0.cairo | 8 +- tests/nodes/is_inf_i32/output_0.cairo | 17 +- tests/nodes/is_inf_i8.cairo | 22 ++ tests/nodes/is_inf_i8/input_0.cairo | 18 ++ tests/nodes/is_inf_i8/output_0.cairo | 17 + ...axes_input_noop.cairo => is_inf_u32.cairo} | 16 +- .../input_0.cairo | 9 +- tests/nodes/is_inf_u32/output_0.cairo | 17 + tests/nodes/is_nan_fp16x16.cairo | 14 +- tests/nodes/is_nan_fp16x16/input_0.cairo | 8 +- tests/nodes/is_nan_fp16x16/output_0.cairo | 17 +- ..._2D_keepdims.cairo => is_nan_fp8x23.cairo} | 16 +- ..._axis_1.cairo => is_neg_inf_fp16x16.cairo} | 12 +- .../input_0.cairo | 9 +- tests/nodes/is_neg_inf_fp16x16/output_0.cairo | 17 + ...D_axis_1.cairo => is_neg_inf_fp8x23.cairo} | 14 +- .../input_0.cairo | 7 +- tests/nodes/is_neg_inf_fp8x23/output_0.cairo | 17 + tests/nodes/is_neg_inf_i32.cairo | 14 +- tests/nodes/is_neg_inf_i32/input_0.cairo | 8 +- tests/nodes/is_neg_inf_i32/output_0.cairo | 17 +- tests/nodes/is_neg_inf_i8.cairo | 22 ++ tests/nodes/is_neg_inf_i8/input_0.cairo | 18 ++ tests/nodes/is_neg_inf_i8/output_0.cairo | 17 + ...default.cairo => is_pos_inf_fp16x16.cairo} | 12 +- .../input_0.cairo | 11 +- tests/nodes/is_pos_inf_fp16x16/output_0.cairo | 17 + ..._default.cairo => is_pos_inf_fp8x23.cairo} | 14 +- .../input_0.cairo | 9 +- tests/nodes/is_pos_inf_fp8x23/output_0.cairo | 17 + tests/nodes/is_pos_inf_i32.cairo | 14 +- tests/nodes/is_pos_inf_i32/input_0.cairo | 8 +- tests/nodes/is_pos_inf_i32/output_0.cairo | 17 +- tests/nodes/is_pos_inf_i8.cairo | 22 ++ tests/nodes/is_pos_inf_i8/input_0.cairo | 18 ++ tests/nodes/is_pos_inf_i8/output_0.cairo | 17 + tests/nodes/less_equal_fp16x16.cairo | 16 +- tests/nodes/less_equal_fp16x16/input_0.cairo | 6 +- tests/nodes/less_equal_fp16x16/input_1.cairo | 8 +- tests/nodes/less_equal_fp16x16/output_0.cairo | 7 +- .../nodes/less_equal_fp16x16_broadcast.cairo | 16 +- .../input_0.cairo | 6 +- .../input_1.cairo | 6 +- .../output_0.cairo | 7 +- tests/nodes/less_equal_fp8x23.cairo | 16 +- tests/nodes/less_equal_fp8x23/input_0.cairo | 4 +- tests/nodes/less_equal_fp8x23/input_1.cairo | 8 +- tests/nodes/less_equal_fp8x23/output_0.cairo | 7 +- tests/nodes/less_equal_fp8x23_broadcast.cairo | 16 +- .../less_equal_fp8x23_broadcast/input_0.cairo | 6 +- .../less_equal_fp8x23_broadcast/input_1.cairo | 4 +- .../output_0.cairo | 7 +- tests/nodes/less_equal_i32.cairo | 12 +- tests/nodes/less_equal_i32/input_0.cairo | 7 +- tests/nodes/less_equal_i32/input_1.cairo | 7 +- tests/nodes/less_equal_i32/output_0.cairo | 7 +- tests/nodes/less_equal_i32_broadcast.cairo | 12 +- .../less_equal_i32_broadcast/input_0.cairo | 7 +- .../less_equal_i32_broadcast/input_1.cairo | 7 +- .../less_equal_i32_broadcast/output_0.cairo | 7 +- tests/nodes/less_equal_i8.cairo | 16 +- tests/nodes/less_equal_i8/input_0.cairo | 7 +- tests/nodes/less_equal_i8/input_1.cairo | 7 +- tests/nodes/less_equal_i8/output_0.cairo | 7 +- tests/nodes/less_equal_i8_broadcast.cairo | 16 +- .../less_equal_i8_broadcast/input_0.cairo | 9 +- .../less_equal_i8_broadcast/input_1.cairo | 7 +- .../less_equal_i8_broadcast/output_0.cairo | 11 +- tests/nodes/less_equal_u32.cairo | 14 +- tests/nodes/less_equal_u32/input_0.cairo | 7 +- tests/nodes/less_equal_u32/input_1.cairo | 7 +- tests/nodes/less_equal_u32/output_0.cairo | 7 +- tests/nodes/less_equal_u32_broadcast.cairo | 14 +- .../less_equal_u32_broadcast/input_0.cairo | 9 +- .../less_equal_u32_broadcast/input_1.cairo | 5 +- .../less_equal_u32_broadcast/output_0.cairo | 9 +- tests/nodes/less_fp16x16.cairo | 16 +- tests/nodes/less_fp16x16/input_0.cairo | 30 +- tests/nodes/less_fp16x16/input_1.cairo | 30 +- tests/nodes/less_fp16x16/output_0.cairo | 19 +- tests/nodes/less_fp16x16_broadcast.cairo | 16 +- .../less_fp16x16_broadcast/input_0.cairo | 30 +- .../less_fp16x16_broadcast/input_1.cairo | 4 +- .../less_fp16x16_broadcast/output_0.cairo | 19 +- tests/nodes/less_fp8x23.cairo | 16 +- tests/nodes/less_fp8x23/input_0.cairo | 28 +- tests/nodes/less_fp8x23/input_1.cairo | 30 +- tests/nodes/less_fp8x23/output_0.cairo | 19 +- tests/nodes/less_fp8x23_broadcast.cairo | 16 +- .../nodes/less_fp8x23_broadcast/input_0.cairo | 26 +- .../nodes/less_fp8x23_broadcast/input_1.cairo | 6 +- .../less_fp8x23_broadcast/output_0.cairo | 17 +- tests/nodes/less_i32.cairo | 14 +- tests/nodes/less_i32/input_0.cairo | 27 +- tests/nodes/less_i32/input_1.cairo | 29 +- tests/nodes/less_i32/output_0.cairo | 19 +- tests/nodes/less_i32_broadcast.cairo | 14 +- tests/nodes/less_i32_broadcast/input_0.cairo | 31 +- tests/nodes/less_i32_broadcast/input_1.cairo | 5 +- tests/nodes/less_i32_broadcast/output_0.cairo | 21 +- tests/nodes/less_i8.cairo | 18 +- tests/nodes/less_i8/input_0.cairo | 29 +- tests/nodes/less_i8/input_1.cairo | 33 +- tests/nodes/less_i8/output_0.cairo | 17 +- tests/nodes/less_i8_broadcast.cairo | 18 +- tests/nodes/less_i8_broadcast/input_0.cairo | 31 +- tests/nodes/less_i8_broadcast/input_1.cairo | 7 +- tests/nodes/less_i8_broadcast/output_0.cairo | 19 +- tests/nodes/less_u32.cairo | 14 +- tests/nodes/less_u32/input_0.cairo | 31 +- tests/nodes/less_u32/input_1.cairo | 31 +- tests/nodes/less_u32/output_0.cairo | 21 +- tests/nodes/less_u32_broadcast.cairo | 14 +- tests/nodes/less_u32_broadcast/input_0.cairo | 29 +- tests/nodes/less_u32_broadcast/input_1.cairo | 7 +- tests/nodes/less_u32_broadcast/output_0.cairo | 35 +- tests/nodes/not_bool.cairo | 10 +- tests/nodes/not_bool/input_0.cairo | 2 +- tests/nodes/not_bool/output_0.cairo | 2 +- tests/nodes/or_fp16x16.cairo | 14 +- tests/nodes/or_fp16x16/input_0.cairo | 26 +- tests/nodes/or_fp16x16/input_1.cairo | 32 +- tests/nodes/or_fp16x16/output_0.cairo | 7 +- tests/nodes/or_fp16x16_broadcast.cairo | 14 +- .../nodes/or_fp16x16_broadcast/input_0.cairo | 6 +- .../nodes/or_fp16x16_broadcast/input_1.cairo | 4 +- .../nodes/or_fp16x16_broadcast/output_0.cairo | 5 +- tests/nodes/or_fp8x23.cairo | 16 +- tests/nodes/or_fp8x23/input_0.cairo | 26 +- tests/nodes/or_fp8x23/input_1.cairo | 28 +- tests/nodes/or_fp8x23/output_0.cairo | 7 +- tests/nodes/or_fp8x23_broadcast.cairo | 16 +- tests/nodes/or_fp8x23_broadcast/input_0.cairo | 8 +- tests/nodes/or_fp8x23_broadcast/input_1.cairo | 6 +- .../nodes/or_fp8x23_broadcast/output_0.cairo | 5 +- tests/nodes/or_i32.cairo | 12 +- tests/nodes/or_i32/input_0.cairo | 27 +- tests/nodes/or_i32/input_1.cairo | 31 +- tests/nodes/or_i32/output_0.cairo | 7 +- tests/nodes/or_i32_broadcast.cairo | 12 +- tests/nodes/or_i32_broadcast/input_0.cairo | 7 +- tests/nodes/or_i32_broadcast/input_1.cairo | 7 +- tests/nodes/or_i32_broadcast/output_0.cairo | 5 +- tests/nodes/or_i8.cairo | 14 +- tests/nodes/or_i8/input_0.cairo | 33 +- tests/nodes/or_i8/input_1.cairo | 29 +- tests/nodes/or_i8/output_0.cairo | 7 +- tests/nodes/or_i8_broadcast.cairo | 14 +- tests/nodes/or_i8_broadcast/input_0.cairo | 9 +- tests/nodes/or_i8_broadcast/input_1.cairo | 5 +- tests/nodes/or_i8_broadcast/output_0.cairo | 5 +- tests/nodes/or_u32.cairo | 14 +- tests/nodes/or_u32/input_0.cairo | 23 +- tests/nodes/or_u32/input_1.cairo | 29 +- tests/nodes/or_u32/output_0.cairo | 9 +- tests/nodes/or_u32_broadcast.cairo | 14 +- tests/nodes/or_u32_broadcast/input_0.cairo | 9 +- tests/nodes/or_u32_broadcast/input_1.cairo | 7 +- tests/nodes/or_u32_broadcast/output_0.cairo | 5 +- .../reduce_sum_default_axes_keepdims.cairo | 20 -- .../input_0.cairo | 26 -- .../output_0.cairo | 15 - .../input_0.cairo | 26 -- .../output_0.cairo | 26 -- tests/nodes/reduce_sum_keep_dims.cairo | 20 -- .../nodes/reduce_sum_keep_dims/input_0.cairo | 26 -- .../nodes/reduce_sum_keep_dims/output_0.cairo | 20 -- tests/nodes/reduce_sum_no_keep_dims.cairo | 20 -- .../reduce_sum_no_keep_dims/input_0.cairo | 26 -- .../reduce_sum_no_keep_dims/output_0.cairo | 19 -- .../output_0.cairo | 13 - .../output_0.cairo | 14 - .../output_0.cairo | 14 - ..._sum_single_axis_fp16x16_2D_keepdims.cairo | 20 -- .../input_0.cairo | 17 - .../output_0.cairo | 15 - .../output_0.cairo | 13 - .../input_0.cairo | 17 - .../output_0.cairo | 14 - .../input_0.cairo | 17 - .../output_0.cairo | 14 - .../input_0.cairo | 17 - .../output_0.cairo | 15 - .../nodes/reduce_sum_single_axis_i32_1D.cairo | 20 -- .../input_0.cairo | 15 - .../output_0.cairo | 13 - ...reduce_sum_single_axis_i32_2D_axis_1.cairo | 20 -- .../input_0.cairo | 17 - .../output_0.cairo | 14 - ...educe_sum_single_axis_i32_2D_default.cairo | 20 -- .../input_0.cairo | 17 - .../output_0.cairo | 14 - ...duce_sum_single_axis_i32_2D_keepdims.cairo | 20 -- .../input_0.cairo | 17 - .../output_0.cairo | 15 - .../nodes/reduce_sum_single_axis_i8_1D.cairo | 20 -- .../output_0.cairo | 13 - .../reduce_sum_single_axis_i8_2D_axis_1.cairo | 20 -- .../output_0.cairo | 14 - ...reduce_sum_single_axis_i8_2D_default.cairo | 20 -- .../input_0.cairo | 17 - .../output_0.cairo | 14 - ...educe_sum_single_axis_i8_2D_keepdims.cairo | 20 -- .../input_0.cairo | 17 - .../output_0.cairo | 15 - .../nodes/reduce_sum_single_axis_u32_1D.cairo | 20 -- .../output_0.cairo | 13 - ...reduce_sum_single_axis_u32_2D_axis_1.cairo | 20 -- .../input_0.cairo | 17 - .../output_0.cairo | 14 - ...educe_sum_single_axis_u32_2D_default.cairo | 20 -- .../input_0.cairo | 17 - .../output_0.cairo | 14 - ...duce_sum_single_axis_u32_2D_keepdims.cairo | 20 -- .../input_0.cairo | 17 - .../output_0.cairo | 15 - tests/nodes/reshape_extended_dims.cairo | 20 -- .../nodes/reshape_extended_dims/input_0.cairo | 38 --- .../reshape_extended_dims/output_0.cairo | 39 --- tests/nodes/reshape_negative_dim.cairo | 20 -- .../nodes/reshape_negative_dim/input_0.cairo | 38 --- .../nodes/reshape_negative_dim/output_0.cairo | 38 --- .../reshape_negative_extended_dims.cairo | 20 -- .../input_0.cairo | 38 --- .../output_0.cairo | 39 --- tests/nodes/reshape_one_dim.cairo | 20 -- tests/nodes/reshape_one_dim/input_0.cairo | 38 --- tests/nodes/reshape_one_dim/output_0.cairo | 36 --- tests/nodes/reshape_reduced_dims.cairo | 20 -- .../nodes/reshape_reduced_dims/input_0.cairo | 38 --- .../nodes/reshape_reduced_dims/output_0.cairo | 37 --- tests/nodes/reshape_reordered_all_dims.cairo | 20 -- .../reshape_reordered_all_dims/input_0.cairo | 38 --- .../reshape_reordered_all_dims/output_0.cairo | 38 --- tests/nodes/reshape_reordered_last_dims.cairo | 20 -- .../reshape_reordered_last_dims/input_0.cairo | 38 --- .../output_0.cairo | 38 --- .../nodes/reshape_zero_and_negative_dim.cairo | 20 -- .../input_0.cairo | 38 --- .../output_0.cairo | 39 --- tests/nodes/reshape_zero_dim.cairo | 20 -- tests/nodes/reshape_zero_dim/input_0.cairo | 38 --- tests/nodes/reshape_zero_dim/output_0.cairo | 39 --- tests/nodes/xor_fp16x16.cairo | 16 +- tests/nodes/xor_fp16x16/input_0.cairo | 8 +- tests/nodes/xor_fp16x16/input_1.cairo | 6 +- tests/nodes/xor_fp16x16/output_0.cairo | 9 +- tests/nodes/xor_fp16x16_broadcast.cairo | 16 +- .../nodes/xor_fp16x16_broadcast/input_0.cairo | 6 +- .../nodes/xor_fp16x16_broadcast/input_1.cairo | 4 +- .../xor_fp16x16_broadcast/output_0.cairo | 11 +- tests/nodes/xor_fp8x23.cairo | 14 +- tests/nodes/xor_fp8x23/input_0.cairo | 34 +- tests/nodes/xor_fp8x23/input_1.cairo | 26 +- tests/nodes/xor_fp8x23/output_0.cairo | 11 +- tests/nodes/xor_fp8x23_broadcast.cairo | 14 +- .../nodes/xor_fp8x23_broadcast/input_0.cairo | 6 +- .../nodes/xor_fp8x23_broadcast/input_1.cairo | 6 +- .../nodes/xor_fp8x23_broadcast/output_0.cairo | 9 +- tests/nodes/xor_i32.cairo | 14 +- tests/nodes/xor_i32/input_0.cairo | 31 +- tests/nodes/xor_i32/input_1.cairo | 31 +- tests/nodes/xor_i32/output_0.cairo | 11 +- tests/nodes/xor_i32_broadcast.cairo | 14 +- tests/nodes/xor_i32_broadcast/input_0.cairo | 9 +- tests/nodes/xor_i32_broadcast/input_1.cairo | 7 +- tests/nodes/xor_i32_broadcast/output_0.cairo | 9 +- tests/nodes/xor_i8.cairo | 16 +- tests/nodes/xor_i8/input_0.cairo | 27 +- tests/nodes/xor_i8/input_1.cairo | 33 +- tests/nodes/xor_i8/output_0.cairo | 15 +- tests/nodes/xor_i8_broadcast.cairo | 16 +- tests/nodes/xor_i8_broadcast/input_0.cairo | 9 +- tests/nodes/xor_i8_broadcast/input_1.cairo | 7 +- tests/nodes/xor_i8_broadcast/output_0.cairo | 9 +- tests/nodes/xor_u32.cairo | 14 +- tests/nodes/xor_u32/input_0.cairo | 33 +- tests/nodes/xor_u32/input_1.cairo | 31 +- tests/nodes/xor_u32/output_0.cairo | 13 +- tests/nodes/xor_u32_broadcast.cairo | 14 +- tests/nodes/xor_u32_broadcast/input_0.cairo | 9 +- tests/nodes/xor_u32_broadcast/input_1.cairo | 7 +- tests/nodes/xor_u32_broadcast/output_0.cairo | 7 +- 489 files changed, 3907 insertions(+), 6252 deletions(-) create mode 100644 .DS_Store delete mode 100644 docs/framework/operators/tensor/tensor.reduce_sum_single_axis.md delete mode 100644 nodegen/node/reduce_sum_single_axis.py delete mode 100644 nodegen/node/reshape.py delete mode 100644 src/operators/tensor/math/reduce_sum_single_axis.cairo rename tests/nodes/{reduce_sum_single_axis_fp16x16_1D.cairo => is_inf_fp16x16.cairo} (60%) rename tests/nodes/{reduce_sum_single_axis_fp16x16_2D_default => is_inf_fp16x16}/input_0.cairo (63%) create mode 100644 tests/nodes/is_inf_fp16x16/output_0.cairo rename tests/nodes/{reduce_sum_single_axis_fp8x23_1D.cairo => is_inf_fp8x23.cairo} (59%) rename tests/nodes/{reduce_sum_single_axis_fp8x23_1D => is_inf_fp8x23}/input_0.cairo (62%) create mode 100644 tests/nodes/is_inf_fp8x23/output_0.cairo create mode 100644 tests/nodes/is_inf_i8.cairo create mode 100644 tests/nodes/is_inf_i8/input_0.cairo create mode 100644 tests/nodes/is_inf_i8/output_0.cairo rename tests/nodes/{reduce_sum_empty_axes_input_noop.cairo => is_inf_u32.cairo} (59%) rename tests/nodes/{reduce_sum_single_axis_u32_1D => is_inf_u32}/input_0.cairo (72%) create mode 100644 tests/nodes/is_inf_u32/output_0.cairo rename tests/nodes/{reduce_sum_single_axis_fp8x23_2D_keepdims.cairo => is_nan_fp8x23.cairo} (51%) rename tests/nodes/{reduce_sum_single_axis_fp16x16_2D_axis_1.cairo => is_neg_inf_fp16x16.cairo} (59%) rename tests/nodes/{reduce_sum_single_axis_fp16x16_1D => is_neg_inf_fp16x16}/input_0.cairo (63%) create mode 100644 tests/nodes/is_neg_inf_fp16x16/output_0.cairo rename tests/nodes/{reduce_sum_single_axis_fp8x23_2D_axis_1.cairo => is_neg_inf_fp8x23.cairo} (58%) rename tests/nodes/{reduce_sum_single_axis_i8_1D => is_neg_inf_fp8x23}/input_0.cairo (71%) create mode 100644 tests/nodes/is_neg_inf_fp8x23/output_0.cairo create mode 100644 tests/nodes/is_neg_inf_i8.cairo create mode 100644 tests/nodes/is_neg_inf_i8/input_0.cairo create mode 100644 tests/nodes/is_neg_inf_i8/output_0.cairo rename tests/nodes/{reduce_sum_single_axis_fp16x16_2D_default.cairo => is_pos_inf_fp16x16.cairo} (59%) rename tests/nodes/{reduce_sum_single_axis_fp16x16_2D_axis_1 => is_pos_inf_fp16x16}/input_0.cairo (58%) create mode 100644 tests/nodes/is_pos_inf_fp16x16/output_0.cairo rename tests/nodes/{reduce_sum_single_axis_fp8x23_2D_default.cairo => is_pos_inf_fp8x23.cairo} (58%) rename tests/nodes/{reduce_sum_single_axis_i8_2D_axis_1 => is_pos_inf_fp8x23}/input_0.cairo (65%) create mode 100644 tests/nodes/is_pos_inf_fp8x23/output_0.cairo create mode 100644 tests/nodes/is_pos_inf_i8.cairo create mode 100644 tests/nodes/is_pos_inf_i8/input_0.cairo create mode 100644 tests/nodes/is_pos_inf_i8/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_default_axes_keepdims.cairo delete mode 100644 tests/nodes/reduce_sum_default_axes_keepdims/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_default_axes_keepdims/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_empty_axes_input_noop/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_empty_axes_input_noop/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_keep_dims.cairo delete mode 100644 tests/nodes/reduce_sum_keep_dims/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_keep_dims/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_no_keep_dims.cairo delete mode 100644 tests/nodes/reduce_sum_no_keep_dims/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_no_keep_dims/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_fp16x16_1D/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_fp16x16_2D_axis_1/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_fp16x16_2D_default/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_fp8x23_1D/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_fp8x23_2D_default/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_fp8x23_2D_default/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_i32_1D.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_i32_1D/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_i32_1D/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_i32_2D_axis_1.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_i32_2D_axis_1/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_i32_2D_axis_1/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_i32_2D_default.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_i32_2D_default/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_i32_2D_default/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_i32_2D_keepdims.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_i32_2D_keepdims/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_i32_2D_keepdims/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_i8_1D.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_i8_1D/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_i8_2D_axis_1.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_i8_2D_axis_1/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_i8_2D_default.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_i8_2D_default/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_i8_2D_default/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_i8_2D_keepdims.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_i8_2D_keepdims/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_i8_2D_keepdims/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_u32_1D.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_u32_1D/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_u32_2D_axis_1.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_u32_2D_axis_1/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_u32_2D_axis_1/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_u32_2D_default.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_u32_2D_default/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_u32_2D_default/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_u32_2D_keepdims.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_u32_2D_keepdims/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_single_axis_u32_2D_keepdims/output_0.cairo delete mode 100644 tests/nodes/reshape_extended_dims.cairo delete mode 100644 tests/nodes/reshape_extended_dims/input_0.cairo delete mode 100644 tests/nodes/reshape_extended_dims/output_0.cairo delete mode 100644 tests/nodes/reshape_negative_dim.cairo delete mode 100644 tests/nodes/reshape_negative_dim/input_0.cairo delete mode 100644 tests/nodes/reshape_negative_dim/output_0.cairo delete mode 100644 tests/nodes/reshape_negative_extended_dims.cairo delete mode 100644 tests/nodes/reshape_negative_extended_dims/input_0.cairo delete mode 100644 tests/nodes/reshape_negative_extended_dims/output_0.cairo delete mode 100644 tests/nodes/reshape_one_dim.cairo delete mode 100644 tests/nodes/reshape_one_dim/input_0.cairo delete mode 100644 tests/nodes/reshape_one_dim/output_0.cairo delete mode 100644 tests/nodes/reshape_reduced_dims.cairo delete mode 100644 tests/nodes/reshape_reduced_dims/input_0.cairo delete mode 100644 tests/nodes/reshape_reduced_dims/output_0.cairo delete mode 100644 tests/nodes/reshape_reordered_all_dims.cairo delete mode 100644 tests/nodes/reshape_reordered_all_dims/input_0.cairo delete mode 100644 tests/nodes/reshape_reordered_all_dims/output_0.cairo delete mode 100644 tests/nodes/reshape_reordered_last_dims.cairo delete mode 100644 tests/nodes/reshape_reordered_last_dims/input_0.cairo delete mode 100644 tests/nodes/reshape_reordered_last_dims/output_0.cairo delete mode 100644 tests/nodes/reshape_zero_and_negative_dim.cairo delete mode 100644 tests/nodes/reshape_zero_and_negative_dim/input_0.cairo delete mode 100644 tests/nodes/reshape_zero_and_negative_dim/output_0.cairo delete mode 100644 tests/nodes/reshape_zero_dim.cairo delete mode 100644 tests/nodes/reshape_zero_dim/input_0.cairo delete mode 100644 tests/nodes/reshape_zero_dim/output_0.cairo diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 GIT binary patch literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0, other: @Tensor) -> Tensor; + fn and(self: @Tensor, other: @Tensor) -> Tensor; ``` Computes the logical AND of two tensors element-wise. @@ -20,7 +20,7 @@ The input tensors must have either: ## Returns -A new `Tensor` with the same shape as the broadcasted inputs. +A new `Tensor` with the same shape as the broadcasted inputs. ## Examples @@ -29,7 +29,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, BoolTensor}; -fn and_example() -> Tensor { +fn and_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3].span(), data: array![false, true, false, false, false, true, true, false, true, false, false, true].span(), ); diff --git a/docs/framework/operators/tensor/tensor.equal.md b/docs/framework/operators/tensor/tensor.equal.md index 91599531c..6e393c989 100644 --- a/docs/framework/operators/tensor/tensor.equal.md +++ b/docs/framework/operators/tensor/tensor.equal.md @@ -1,7 +1,7 @@ #tensor.equal ```rust - fn equal(self: @Tensor, other: @Tensor) -> Tensor; + fn equal(self: @Tensor, other: @Tensor) -> Tensor; ``` Check if two tensors are equal element-wise. @@ -20,7 +20,7 @@ The input tensors must have either: ## Returns -A new `Tensor` (1 if equal, 0 otherwise) with the same shape as the broadcasted inputs. +A new `Tensor` of booleans (1 if equal, 0 otherwise) with the same shape as the broadcasted inputs. ## Examples @@ -31,7 +31,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn eq_example() -> Tensor { +fn eq_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); @@ -43,7 +43,7 @@ fn eq_example() -> Tensor { // We can call `equal` function as follows. return tensor_1.equal(@tensor_2); } ->>> [true,true,true,true,true,false,false,false] +>>> [1,1,1,1,1,0,0,0] ``` Case 2: Compare tensors with different shapes @@ -53,7 +53,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn eq_example() -> Tensor { +fn eq_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); @@ -63,5 +63,5 @@ fn eq_example() -> Tensor { // We can call `equal` function as follows. return tensor_1.equal(@tensor_2); } ->>> [true,true,true,false,false,false,false,false,false] +>>> [1,1,1,0,0,0,0,0,0] ``` diff --git a/docs/framework/operators/tensor/tensor.greater.md b/docs/framework/operators/tensor/tensor.greater.md index 5704dc351..fb186b8f7 100644 --- a/docs/framework/operators/tensor/tensor.greater.md +++ b/docs/framework/operators/tensor/tensor.greater.md @@ -1,7 +1,7 @@ #tensor.greater ```rust - fn greater(self: @Tensor, other: @Tensor) -> Tensor; + fn greater(self: @Tensor, other: @Tensor) -> Tensor; ``` Check if each element of the first tensor is greater than the corresponding element of the second tensor. @@ -20,7 +20,7 @@ The input tensors must have either: ## Returns -A new `Tensor` of booleans (0 or 1) with the same shape as the broadcasted inputs. +A new `Tensor` of booleans (0 or 1) with the same shape as the broadcasted inputs. ## Examples @@ -31,7 +31,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn greater_example() -> Tensor { +fn greater_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); @@ -53,7 +53,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn greater_example() -> Tensor { +fn greater_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); diff --git a/docs/framework/operators/tensor/tensor.greater_equal.md b/docs/framework/operators/tensor/tensor.greater_equal.md index ed42a785c..1fecfbdc4 100644 --- a/docs/framework/operators/tensor/tensor.greater_equal.md +++ b/docs/framework/operators/tensor/tensor.greater_equal.md @@ -1,7 +1,7 @@ #tensor.greater_equal ```rust - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor; + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor; ``` Check if each element of the first tensor is greater than or equal to the corresponding element of the second tensor. @@ -31,7 +31,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn greater_equal_example() -> Tensor { +fn greater_equal_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); @@ -53,7 +53,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn greater_equal_example() -> Tensor { +fn greater_equal_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); diff --git a/docs/framework/operators/tensor/tensor.is_inf.md b/docs/framework/operators/tensor/tensor.is_inf.md index c0c02a548..313b4d8b8 100644 --- a/docs/framework/operators/tensor/tensor.is_inf.md +++ b/docs/framework/operators/tensor/tensor.is_inf.md @@ -1,7 +1,7 @@ ## tensor.is_inf ```rust - fn is_inf(self: @Tensor, detect_negative: Option, detect_positive: Option) -> Tensor; + fn is_inf(self: @Tensor, detect_negative: Option, detect_positive: Option) -> Tensor; ``` Maps infinity to true and other values to false. @@ -23,7 +23,7 @@ A new `Tensor` instance with entries set to true iff the input tensors cor use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{BoolTensor, TensorTrait, Tensor, U32Tensor}; -fn is_inf_example() -> Tensor { +fn is_inf_example() -> Tensor { let tensor = TensorTrait::::new( shape: array![6].span(), data: array![1, 0, NumberTrait::INF(), 8, NumberTrait::INF(), NumberTrait::INF()].span(), ); diff --git a/docs/framework/operators/tensor/tensor.is_nan.md b/docs/framework/operators/tensor/tensor.is_nan.md index 88db61e3f..af6cfa222 100644 --- a/docs/framework/operators/tensor/tensor.is_nan.md +++ b/docs/framework/operators/tensor/tensor.is_nan.md @@ -1,7 +1,7 @@ ## tensor.is_nan ```rust - fn is_nan(self: @Tensor) -> Tensor; + fn is_nan(self: @Tensor) -> Tensor; ``` Maps NaN to true and other values to false. @@ -21,7 +21,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{BoolTensor, TensorTrait, Tensor, FP8x23Tensor}; use orion::numbers::{FixedTrait, FP8x23}; -fn is_nan_example() -> Tensor { +fn is_nan_example() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(4); diff --git a/docs/framework/operators/tensor/tensor.less.md b/docs/framework/operators/tensor/tensor.less.md index b5f56b6a4..d5d264d8a 100644 --- a/docs/framework/operators/tensor/tensor.less.md +++ b/docs/framework/operators/tensor/tensor.less.md @@ -1,7 +1,7 @@ #tensor.less ```rust - fn less(self: @Tensor, other: @Tensor) -> Tensor; + fn less(self: @Tensor, other: @Tensor) -> Tensor; ``` Check if each element of the first tensor is less than the corresponding element of the second tensor. @@ -31,7 +31,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn less_example() -> Tensor { +fn less_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); @@ -53,7 +53,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn less_example() -> Tensor { +fn less_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); @@ -63,5 +63,5 @@ fn less_example() -> Tensor { // We can call `less` function as follows. return tensor_1.less(@tensor_2); } ->>> [false,false,false,false,false,false,false,true,true] +>>> [0,0,0,0,0,0,0,1,1] ``` diff --git a/docs/framework/operators/tensor/tensor.less_equal.md b/docs/framework/operators/tensor/tensor.less_equal.md index 68aa87c92..c440b39c6 100644 --- a/docs/framework/operators/tensor/tensor.less_equal.md +++ b/docs/framework/operators/tensor/tensor.less_equal.md @@ -1,7 +1,7 @@ #tensor.less_equal ```rust - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor; + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor; ``` Check if each element of the first tensor is less than or equal to the corresponding element of the second tensor. @@ -31,7 +31,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn less_equal_example() -> Tensor { +fn less_equal_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); @@ -53,7 +53,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn less_equal_example() -> Tensor { +fn less_equal_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); diff --git a/docs/framework/operators/tensor/tensor.not.md b/docs/framework/operators/tensor/tensor.not.md index e99860400..f9ee10cd9 100644 --- a/docs/framework/operators/tensor/tensor.not.md +++ b/docs/framework/operators/tensor/tensor.not.md @@ -1,7 +1,7 @@ #tensor.not ```rust - fn not(self: @Tensor) -> Tensor; + fn not(self: @Tensor) -> Tensor, other: @Tensor) -> Tensor; + fn or(self: @Tensor, other: @Tensor) -> Tensor; ``` Computes the logical OR of two tensors element-wise. @@ -20,7 +20,7 @@ The input tensors must have either: ## Returns -A new `Tensor` (0 or 1) with the same shape as the broadcasted inputs. +A new `Tensor` of booleans (0 or 1) with the same shape as the broadcasted inputs. ## Examples @@ -31,7 +31,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn or_example() -> Tensor { +fn or_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); @@ -52,7 +52,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn or_example() -> Tensor { +fn or_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); diff --git a/docs/framework/operators/tensor/tensor.reduce_sum.md b/docs/framework/operators/tensor/tensor.reduce_sum.md index 52b49d137..3aa77d2ce 100644 --- a/docs/framework/operators/tensor/tensor.reduce_sum.md +++ b/docs/framework/operators/tensor/tensor.reduce_sum.md @@ -4,14 +4,13 @@ fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; ``` -Computes the sum of the input tensor's elements along the provided axes +Reduces a tensor by summing its elements along a specified axis. ## Args * `self`(`@Tensor`) - The input tensor. -* `axes`(`Option>`) - Optional input list of integers, along which to reduce. -* `keepdims`(`Option`) - If true, retains reduced dimensions with length 1. -* `noop_with_empty_axes`(`Option`) - Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. +* `axis`(`usize`) - The dimension to reduce. +* `keepdims`(`bool`) - If true, retains reduced dimensions with length 1. ## Panics @@ -30,11 +29,11 @@ use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; fn reduce_sum_example() -> Tensor { let tensor = TensorTrait::::new( - shape: array![3, 2, 2].span(), data: array![1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12].span(), + shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(), ); // We can call `reduce_sum` function as follows. - return tensor.reduce_sum(Option::Some(array![1].span()), Option::Some(false), Option::None); + return tensor.reduce_sum(axis: 0, keepdims: false); } ->>> [[4, 6] [12, 14] [20, 22]] +>>> [[4,6],[8,10]] ``` diff --git a/docs/framework/operators/tensor/tensor.reduce_sum_single_axis.md b/docs/framework/operators/tensor/tensor.reduce_sum_single_axis.md deleted file mode 100644 index 3eab60dfa..000000000 --- a/docs/framework/operators/tensor/tensor.reduce_sum_single_axis.md +++ /dev/null @@ -1,39 +0,0 @@ -## tensor.reduce_sum_single_axis - -```rust - fn reduce_sum_single_axis(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; -``` - -Reduces a tensor by summing its elements along a specified axis. - -## Args - -* `self`(`@Tensor`) - The input tensor. -* `axis`(`usize`) - The dimension to reduce. -* `keepdims`(`bool`) - If true, retains reduced dimensions with length 1. - -## Panics - -* Panics if axis is not in the range of the input tensor's dimensions. - -## Returns - -A new `Tensor` instance with the specified axis reduced by summing its elements. - -## Examples - -```rust -use core::array::{ArrayTrait, SpanTrait}; - -use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; - -fn reduce_sum_single_axis_example() -> Tensor { - let tensor = TensorTrait::::new( - shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(), - ); - - // We can call `reduce_sum_single_axis` function as follows. - return tensor.reduce_sum_single_axis(axis: 0, keepdims: false); -} ->>> [[4,6],[8,10]] -``` diff --git a/docs/framework/operators/tensor/tensor.reshape.md b/docs/framework/operators/tensor/tensor.reshape.md index 399e5fc85..b2c8f84eb 100644 --- a/docs/framework/operators/tensor/tensor.reshape.md +++ b/docs/framework/operators/tensor/tensor.reshape.md @@ -1,7 +1,7 @@ # tensor.reshape ```rust - fn reshape(self: @Tensor, target_shape: Span) -> Tensor; + fn reshape(self: @Tensor, target_shape: Span) -> Tensor; ``` Returns a new tensor with the specified target shape and the same data as the input tensor. @@ -9,7 +9,7 @@ Returns a new tensor with the specified target shape and the same data as the in ## Args * `self`(`@Tensor`) - The input tensor. -* `target_shape`(Span) - A span containing the target shape of the tensor. +* `target_shape`(Span) - A span containing the target shape of the tensor. ## Panics diff --git a/docs/framework/operators/tensor/tensor.xor.md b/docs/framework/operators/tensor/tensor.xor.md index ea4477f62..89e4c41ea 100644 --- a/docs/framework/operators/tensor/tensor.xor.md +++ b/docs/framework/operators/tensor/tensor.xor.md @@ -1,7 +1,7 @@ #tensor.xor ```rust - fn xor(self: @Tensor, other: @Tensor) -> Tensor; + fn xor(self: @Tensor, other: @Tensor) -> Tensor; ``` Computes the logical XOR of two tensors element-wise. @@ -20,7 +20,7 @@ The input tensors must have either: ## Returns -A new `Tensor` (0 or 1) with the same shape as the broadcasted inputs. +A new `Tensor` of booleans (0 or 1) with the same shape as the broadcasted inputs. ## Examples @@ -31,7 +31,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn xor_example() -> Tensor { +fn xor_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); @@ -52,7 +52,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn xor_example() -> Tensor { +fn xor_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); diff --git a/nodegen/file_manager.py b/nodegen/file_manager.py index babe26399..203b6b333 100644 --- a/nodegen/file_manager.py +++ b/nodegen/file_manager.py @@ -91,35 +91,24 @@ def base_template( This method generates a list of strings that form the template of a Cairo test function, including module imports, function definition, and assertions. """ - template = [ + return [ *[f"mod input_{i};" for i in range(arg_cnt)], *[f"mod output_{i};" for i in range(out_cnt)], - "", - "", + *[""], + *[""], *[f"use {ref};" for ref in refs], - "", - "#[test]", - "#[available_gas(2000000000)]", - f"fn test_{name}()" + " {", + *[""], + *["#[test]"], + *["#[available_gas(2000000000)]"], + *[f"fn test_{name}()" + " {"], *[f" let input_{i} = input_{i}::input_{i}();" for i in range(arg_cnt)], *[f" let z_{i} = output_{i}::output_{i}();" for i in range(out_cnt)], - "" - ] - - # Handling conditional function signature based on the number of outputs - if out_cnt > 1: - template.append(f" let ({', '.join(f'y_{i}' for i in range(out_cnt))}) = {func_sig};") - else: - template.append(f" let y_0 = {func_sig};") - - # Continue appending to the template - template.extend([ - "", + *[""], + *[f" let ({', '.join(f'y_{i}' for i in range(out_cnt))}) = {func_sig};"], + *[""], *[f" assert_eq(y_{i}, z_{i});" for i in range(out_cnt)], - "}" - ]) - - return template + *["}"], + ] @classmethod def sequence_template(cls, name: str, arg_cnt: int, refs: list[str], func_sig: str) -> list[str]: diff --git a/nodegen/node/and.py b/nodegen/node/and.py index 8d344398a..975f4580f 100644 --- a/nodegen/node/and.py +++ b/nodegen/node/and.py @@ -13,7 +13,7 @@ def default(): x = Tensor(Dtype.BOOL, x.shape, x.flatten()) y = Tensor(Dtype.BOOL, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "and_bool" make_test([x, y], z, "BoolTensor::and(@input_0, @input_1)", name) @@ -25,7 +25,7 @@ def broadcast(): x = Tensor(Dtype.BOOL, x.shape, x.flatten()) y = Tensor(Dtype.BOOL, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "and_bool_broadcast" make_test([x, y], z, "BoolTensor::and(@input_0, @input_1)", name) diff --git a/nodegen/node/equal.py b/nodegen/node/equal.py index 162df0840..f995ae999 100644 --- a/nodegen/node/equal.py +++ b/nodegen/node/equal.py @@ -13,7 +13,7 @@ def default(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_u32" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -25,7 +25,7 @@ def broadcast(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_u32_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -42,7 +42,7 @@ def default(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_i32" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -54,7 +54,7 @@ def broadcast(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_i32_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -71,7 +71,7 @@ def default(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_i8" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -83,7 +83,7 @@ def broadcast(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_i8_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -102,7 +102,7 @@ def default(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_fp8x23" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -116,7 +116,7 @@ def broadcast(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_fp8x23_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -135,7 +135,7 @@ def default(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_fp16x16" make_test([x, y], z, "input_0.equal(@input_1)", name) @@ -149,7 +149,7 @@ def broadcast(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "equal_fp16x16_broadcast" make_test([x, y], z, "input_0.equal(@input_1)", name) diff --git a/nodegen/node/greater.py b/nodegen/node/greater.py index dc95017b7..2fd793847 100644 --- a/nodegen/node/greater.py +++ b/nodegen/node/greater.py @@ -13,7 +13,7 @@ def default(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_u32" make_test([x, y], z, "input_0.greater(@input_1)", name) @@ -25,7 +25,7 @@ def broadcast(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_u32_broadcast" make_test([x, y], z, "input_0.greater(@input_1)", name) @@ -42,7 +42,7 @@ def default(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_i32" make_test([x, y], z, "input_0.greater(@input_1)", name) @@ -54,7 +54,7 @@ def broadcast(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_i32_broadcast" make_test([x, y], z, "input_0.greater(@input_1)", name) @@ -71,7 +71,7 @@ def default(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_i8" make_test([x, y], z, "input_0.greater(@input_1)", name) @@ -83,7 +83,7 @@ def broadcast(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_i8_broadcast" make_test([x, y], z, "input_0.greater(@input_1)", name) @@ -102,7 +102,7 @@ def default(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_fp8x23" make_test([x, y], z, "input_0.greater(@input_1)", name) @@ -116,7 +116,7 @@ def broadcast(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_fp8x23_broadcast" make_test([x, y], z, "input_0.greater(@input_1)", name) @@ -135,7 +135,7 @@ def default(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_fp16x16" make_test([x, y], z, "input_0.greater(@input_1)", name) @@ -149,7 +149,7 @@ def broadcast(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_fp16x16_broadcast" make_test([x, y], z, "input_0.greater(@input_1)", name) diff --git a/nodegen/node/greater_equal.py b/nodegen/node/greater_equal.py index a4c54b672..2d43f7cc2 100644 --- a/nodegen/node/greater_equal.py +++ b/nodegen/node/greater_equal.py @@ -13,7 +13,7 @@ def default(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_equal_u32" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) @@ -25,7 +25,7 @@ def broadcast(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_equal_u32_broadcast" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) @@ -42,7 +42,7 @@ def default(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_equal_i32" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) @@ -54,7 +54,7 @@ def broadcast(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_equal_i32_broadcast" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) @@ -71,7 +71,7 @@ def default(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_equal_i8" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) @@ -83,7 +83,7 @@ def broadcast(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_equal_i8_broadcast" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) @@ -102,7 +102,7 @@ def default(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_equal_fp8x23" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) @@ -116,7 +116,7 @@ def broadcast(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_equal_fp8x23_broadcast" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) @@ -135,7 +135,7 @@ def default(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_equal_fp16x16" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) @@ -149,7 +149,7 @@ def broadcast(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "greater_equal_fp16x16_broadcast" make_test([x, y], z, "input_0.greater_equal(@input_1)", name) diff --git a/nodegen/node/is_inf.py b/nodegen/node/is_inf.py index 9f72e315c..ef67a216c 100644 --- a/nodegen/node/is_inf.py +++ b/nodegen/node/is_inf.py @@ -6,38 +6,162 @@ class Is_inf(RunAll): + @staticmethod + def is_inf_u32(): + def default(): + input_0 = np.array([1, 0, INF, 8, -INF, INF], dtype=np.uint32) + output = np.array([False, False, True, False, True, True], dtype=bool) + + input_0 = Tensor(Dtype.U32, input_0.shape, input_0.flatten()) + output = Tensor(Dtype.BOOL, output.shape, output.flatten()) + + name = "is_inf_u32" + make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::None, Option::None)", name) + + default() + @staticmethod def is_inf_i32(): def default(): input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int32) - output = np.array([0, 0, 1, 0, 1, 1], dtype=np.uint32) + output = np.array([False, False, True, False, True, True], dtype=bool) input_0 = Tensor(Dtype.I32, input_0.shape, input_0.flatten()) - output = Tensor(Dtype.U32, output.shape, output.flatten()) + output = Tensor(Dtype.BOOL, output.shape, output.flatten()) name = "is_inf_i32" make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::None, Option::None)", name) def positive(): input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int32) - output = np.array([0, 0, 1, 0, 0, 1], dtype=np.uint32) + output = np.array([False, False, True, False, False, True], dtype=bool) input_0 = Tensor(Dtype.I32, input_0.shape, input_0.flatten()) - output = Tensor(Dtype.U32, output.shape, output.flatten()) + output = Tensor(Dtype.BOOL, output.shape, output.flatten()) name = "is_pos_inf_i32" make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1))", name) def negative(): input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int32) - output = np.array([0, 0, 0, 0, 1, 0], dtype=np.uint32) + output = np.array([False, False, False, False, True, False], dtype=bool) input_0 = Tensor(Dtype.I32, input_0.shape, input_0.flatten()) - output = Tensor(Dtype.U32, output.shape, output.flatten()) + output = Tensor(Dtype.BOOL, output.shape, output.flatten()) name = "is_neg_inf_i32" make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0))", name) default() positive() - negative() \ No newline at end of file + negative() + + @staticmethod + def is_inf_i8(): + def default(): + input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int8) + output = np.array([False, False, True, False, True, True], dtype=bool) + + input_0 = Tensor(Dtype.I8, input_0.shape, input_0.flatten()) + output = Tensor(Dtype.BOOL, output.shape, output.flatten()) + + name = "is_inf_i8" + make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::None, Option::None)", name) + + def positive(): + input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int32) + output = np.array([False, False, True, False, False, True], dtype=bool) + + input_0 = Tensor(Dtype.I8, input_0.shape, input_0.flatten()) + output = Tensor(Dtype.BOOL, output.shape, output.flatten()) + + name = "is_pos_inf_i8" + make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1))", name) + + def negative(): + input_0 = np.array([-1, 0, INF, 8, -INF, INF], dtype=np.int32) + output = np.array([False, False, False, False, True, False], dtype=bool) + + input_0 = Tensor(Dtype.I8, input_0.shape, input_0.flatten()) + output = Tensor(Dtype.BOOL, output.shape, output.flatten()) + + name = "is_neg_inf_i8" + make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0))", name) + + default() + positive() + negative() + + @staticmethod + def is_inf_fp8x23(): + def default(): + input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64) + output = np.array([False, False, True, False, True, True], dtype=bool) + + input_0 = Tensor(Dtype.FP8x23, input_0.shape, to_fp( + input_0.flatten(), FixedImpl.FP8x23)) + output = Tensor(Dtype.BOOL, output.shape, output.flatten()) + + name = "is_inf_fp8x23" + make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::None, Option::None)", name) + + def positive(): + input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64) + output = np.array([False, False, True, False, False, True], dtype=bool) + + input_0 = Tensor(Dtype.FP8x23, input_0.shape, input_0.flatten()) + output = Tensor(Dtype.BOOL, output.shape, output.flatten()) + + name = "is_pos_inf_fp8x23" + make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1))", name) + + def negative(): + input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64) + output = np.array([False, False, False, False, True, False], dtype=bool) + + input_0 = Tensor(Dtype.FP8x23, input_0.shape, input_0.flatten()) + output = Tensor(Dtype.BOOL, output.shape, output.flatten()) + + name = "is_neg_inf_fp8x23" + make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0))", name) + + default() + positive() + negative() + + @staticmethod + def is_inf_fp16x16(): + def default(): + input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64) + output = np.array([False, False, True, False, True, True], dtype=bool) + + input_0 = Tensor(Dtype.FP16x16, input_0.shape, to_fp( + input_0.flatten(), FixedImpl.FP16x16)) + output = Tensor(Dtype.BOOL, output.shape, output.flatten()) + + name = "is_inf_fp16x16" + make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::None, Option::None)", name) + + def positive(): + input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64) + output = np.array([False, False, True, False, False, True], dtype=bool) + + input_0 = Tensor(Dtype.FP16x16, input_0.shape, input_0.flatten()) + output = Tensor(Dtype.BOOL, output.shape, output.flatten()) + + name = "is_pos_inf_fp16x16" + make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1))", name) + + def negative(): + input_0 = np.array([-1.2, 0, INF, 2.8, -INF, INF], dtype=np.float64) + output = np.array([False, False, False, False, True, False], dtype=bool) + + input_0 = Tensor(Dtype.FP16x16, input_0.shape, input_0.flatten()) + output = Tensor(Dtype.BOOL, output.shape, output.flatten()) + + name = "is_neg_inf_fp16x16" + make_test([input_0], output, "TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0))", name) + + default() + positive() + negative() diff --git a/nodegen/node/is_nan.py b/nodegen/node/is_nan.py index 469e145ea..05f7ab5df 100644 --- a/nodegen/node/is_nan.py +++ b/nodegen/node/is_nan.py @@ -8,15 +8,30 @@ class Is_nan(RunAll): + @staticmethod + def is_nan_fp8x23(): + def default(): + input_0 = np.array([-1.2, 0, NaN, 2.8, NaN, NaN], dtype=np.float64) + output = np.array([False, False, True, False, True, True], dtype=bool) + + input_0 = Tensor(Dtype.FP8x23, input_0.shape, to_fp( + input_0.flatten(), FixedImpl.FP8x23)) + output = Tensor(Dtype.BOOL, output.shape, output.flatten()) + + name = "is_nan_fp8x23" + make_test([input_0], output, "TensorTrait::is_nan(@input_0)", name) + + default() + @staticmethod def is_nan_fp16x16(): def default(): input_0 = np.array([-1.2, 0, NaN, 2.8, NaN, NaN], dtype=np.float64) - output = np.array([0, 0, 1, 0, 1, 1]) + output = np.array([False, False, True, False, True, True], dtype=bool) input_0 = Tensor(Dtype.FP16x16, input_0.shape, to_fp( input_0.flatten(), FixedImpl.FP16x16)) - output = Tensor(Dtype.U32, output.shape, output.flatten()) + output = Tensor(Dtype.BOOL, output.shape, output.flatten()) name = "is_nan_fp16x16" make_test([input_0], output, "TensorTrait::is_nan(@input_0)", name) diff --git a/nodegen/node/less.py b/nodegen/node/less.py index 452ea2732..20b39263d 100644 --- a/nodegen/node/less.py +++ b/nodegen/node/less.py @@ -13,7 +13,7 @@ def default(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_u32" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -25,7 +25,7 @@ def broadcast(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_u32_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -42,7 +42,7 @@ def default(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_i32" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -54,7 +54,7 @@ def broadcast(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_i32_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -71,7 +71,7 @@ def default(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_i8" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -83,7 +83,7 @@ def broadcast(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_i8_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -102,7 +102,7 @@ def default(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_fp8x23" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -116,7 +116,7 @@ def broadcast(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_fp8x23_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -135,7 +135,7 @@ def default(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_fp16x16" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -149,7 +149,7 @@ def broadcast(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_fp16x16_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) diff --git a/nodegen/node/less_equal.py b/nodegen/node/less_equal.py index 2a29d0816..c54040331 100644 --- a/nodegen/node/less_equal.py +++ b/nodegen/node/less_equal.py @@ -13,7 +13,7 @@ def default(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_equal_u32" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -25,7 +25,7 @@ def broadcast(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_equal_u32_broadcast" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -42,7 +42,7 @@ def default(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_equal_i32" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -54,7 +54,7 @@ def broadcast(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_equal_i32_broadcast" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -71,7 +71,7 @@ def default(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_equal_i8" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -83,7 +83,7 @@ def broadcast(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_equal_i8_broadcast" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -102,7 +102,7 @@ def default(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_equal_fp8x23" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -116,7 +116,7 @@ def broadcast(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_equal_fp8x23_broadcast" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -135,7 +135,7 @@ def default(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_equal_fp16x16" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -149,7 +149,7 @@ def broadcast(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "less_equal_fp16x16_broadcast" make_test([x, y], z, "input_0.less_equal(@input_1)", name) diff --git a/nodegen/node/not.py b/nodegen/node/not.py index ef4cae4fb..6a0f44d65 100644 --- a/nodegen/node/not.py +++ b/nodegen/node/not.py @@ -1,18 +1,17 @@ import numpy as np from nodegen.node import RunAll -from ..helpers import make_test, Tensor, Dtype - +from ..helpers import make_node, make_test, Tensor, Dtype class Not(RunAll): @staticmethod def not_bool(): x = np.random.uniform(True, False, (1, 1)).astype(bool) - y = np.logical_not(x) + y = ~(x) - x = Tensor(Dtype.BOOL, x.shape, x.flatten()) - y = Tensor(Dtype.BOOL, y.shape, y.flatten()) + x = Tensor(Dtype.Bool, x.shape, x.flatten()) + y = Tensor(Dtype.Bool, y.shape, y.flatten()) - name = "not_bool" - make_test([x], y, "input_0.not()", name) - not_bool() + name = "not_bool" + make_node([x], [y], name) + make_test([x], y, "input_0", name) \ No newline at end of file diff --git a/nodegen/node/or.py b/nodegen/node/or.py index 630795182..a39d2adb3 100644 --- a/nodegen/node/or.py +++ b/nodegen/node/or.py @@ -13,7 +13,7 @@ def default(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "or_u32" make_test([x, y], z, "input_0.or(@input_1)", name) @@ -25,7 +25,7 @@ def broadcast(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "or_u32_broadcast" make_test([x, y], z, "input_0.or(@input_1)", name) @@ -42,7 +42,7 @@ def default(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "or_i32" make_test([x, y], z, "input_0.or(@input_1)", name) @@ -54,7 +54,7 @@ def broadcast(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "or_i32_broadcast" make_test([x, y], z, "input_0.or(@input_1)", name) @@ -71,7 +71,7 @@ def default(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "or_i8" make_test([x, y], z, "input_0.or(@input_1)", name) @@ -83,7 +83,7 @@ def broadcast(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "or_i8_broadcast" make_test([x, y], z, "input_0.or(@input_1)", name) @@ -102,7 +102,7 @@ def default(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "or_fp8x23" make_test([x, y], z, "input_0.or(@input_1)", name) @@ -116,7 +116,7 @@ def broadcast(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "or_fp8x23_broadcast" make_test([x, y], z, "input_0.or(@input_1)", name) @@ -135,7 +135,7 @@ def default(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "or_fp16x16" make_test([x, y], z, "input_0.or(@input_1)", name) @@ -149,7 +149,7 @@ def broadcast(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "or_fp16x16_broadcast" make_test([x, y], z, "input_0.or(@input_1)", name) diff --git a/nodegen/node/reduce_sum.py b/nodegen/node/reduce_sum.py index 53b2ed824..111724001 100644 --- a/nodegen/node/reduce_sum.py +++ b/nodegen/node/reduce_sum.py @@ -4,63 +4,285 @@ class Reduce_sum(RunAll): + @staticmethod + def reduce_sum_u32(): + def reduce_sum_1D(): + x = np.array([0, 1, 2,]).astype(np.uint32) + y = np.array([3]).astype(np.uint32) + + x = Tensor(Dtype.U32, x.shape, x.flatten()) + y = Tensor(Dtype.U32, y.shape, y.flatten()) + + name = "reduce_sum_u32_1D" + make_test( + [x], y, "input_0.reduce_sum(0, false)", name) + + def reduce_sum_2D(): + def default(): + x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) + y = np.array([2, 4]).astype(np.uint32) + + x = Tensor(Dtype.U32, x.shape, x.flatten()) + y = Tensor(Dtype.U32, y.shape, y.flatten()) + + name = "reduce_sum_u32_2D_default" + make_test( + [x], y, "input_0.reduce_sum(0, false)", name) + + def keepdims(): + x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) + y = np.array([2, 4]).astype(np.uint32).reshape(1, 2) + + x = Tensor(Dtype.U32, x.shape, x.flatten()) + y = Tensor(Dtype.U32, y.shape, y.flatten()) + + name = "reduce_sum_u32_2D_keepdims" + make_test( + [x], y, "input_0.reduce_sum(0, true)", name) + + def axis_1(): + x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) + y = np.array([1, 5]).astype(np.uint32) + + x = Tensor(Dtype.U32, x.shape, x.flatten()) + y = Tensor(Dtype.U32, y.shape, y.flatten()) + + name = "reduce_sum_u32_2D_axis_1" + make_test( + [x], y, "input_0.reduce_sum(1, false)", name) + + default() + keepdims() + axis_1() + reduce_sum_1D() + reduce_sum_2D() @staticmethod - def reduce_sum_no_keep_dims(): - axes = np.array([1], dtype=np.uint32) - keepdims = 0 + def reduce_sum_i32(): + def reduce_sum_1D(): + x = np.array([0, 1, 2,]).astype(np.int32) + y = np.array([3]).astype(np.int32) - x = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [ - [9, 10], [11, 12]]]).astype(np.uint32) - y = np.sum(x, axis=tuple(axes.tolist()), keepdims=keepdims == 1) + x = Tensor(Dtype.I32, x.shape, x.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) + name = "reduce_sum_i32_1D" + make_test( + [x], y, "input_0.reduce_sum(0, false)", name) - name = "reduce_sum_no_keep_dims" - make_test( - [x], y, "input_0.reduce_sum(Option::Some(array![1].span()), Option::Some(false), Option::None)", name) + def reduce_sum_2D(): + def default(): + x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) + y = np.array([2, 4]).astype(np.int32) + + x = Tensor(Dtype.I32, x.shape, x.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "reduce_sum_i32_2D_default" + make_test( + [x], y, "input_0.reduce_sum(0, false)", name) + + def keepdims(): + x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) + y = np.array([2, 4]).astype(np.int32).reshape(1, 2) + + x = Tensor(Dtype.I32, x.shape, x.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "reduce_sum_i32_2D_keepdims" + make_test( + [x], y, "input_0.reduce_sum(0, true)", name) + + def axis_1(): + x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) + y = np.array([1, 5]).astype(np.int32) + + x = Tensor(Dtype.I32, x.shape, x.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "reduce_sum_i32_2D_axis_1" + make_test( + [x], y, "input_0.reduce_sum(1, false)", name) + + default() + keepdims() + axis_1() + reduce_sum_1D() + reduce_sum_2D() @staticmethod - def reduce_sum_keep_dims(): - axes = np.array([1], dtype=np.uint32) - keepdims = 1 + def reduce_sum_i8(): + def reduce_sum_1D(): + x = np.array([0, 1, 2,]).astype(np.int8) + y = np.array([3]).astype(np.int8) + + x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) + y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) + + name = "reduce_sum_i8_1D" + make_test( + [x], y, "input_0.reduce_sum(0, false)", name) + + def reduce_sum_2D(): + def default(): + x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) + y = np.array([2, 4]).astype(np.int8) + + x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) + y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) - x = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [ - [9, 10], [11, 12]]]).astype(np.uint32) - y = np.sum(x, axis=tuple(axes.tolist()), keepdims=keepdims == 1) + name = "reduce_sum_i8_2D_default" + make_test( + [x], y, "input_0.reduce_sum(0, false)", name) - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) + def keepdims(): + x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) + y = np.array([2, 4]).astype(np.int8).reshape(1, 2) - name = "reduce_sum_keep_dims" - make_test( - [x], y, "input_0.reduce_sum(Option::Some(array![1].span()), Option::Some(true), Option::None)", name) + x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) + y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) + + name = "reduce_sum_i8_2D_keepdims" + make_test( + [x], y, "input_0.reduce_sum(0, true)", name) + + def axis_1(): + x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) + y = np.array([1, 5]).astype(np.int8) + + x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) + y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) + + name = "reduce_sum_i8_2D_axis_1" + make_test( + [x], y, "input_0.reduce_sum(1, false)", name) + + default() + keepdims() + axis_1() + reduce_sum_1D() + reduce_sum_2D() @staticmethod - def reduce_sum_default_axes_keepdims(): - keepdims = 1 + def reduce_sum_fp8x23(): + def reduce_sum_1D(): + x = np.array([0, 1, 2,]).astype(np.int64) + y = np.array([3]).astype(np.int64) + + x = Tensor(Dtype.FP8x23, x.shape, to_fp( + x.flatten(), FixedImpl.FP8x23)) + y = Tensor(Dtype.FP8x23, y.shape, to_fp( + y.flatten(), FixedImpl.FP8x23)) + + name = "reduce_sum_fp8x23_1D" + make_test( + [x], y, "input_0.reduce_sum(0, false)", name) + + def reduce_sum_2D(): + def default(): + x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) + y = np.array([2, 4]).astype(np.int64) + + x = Tensor(Dtype.FP8x23, x.shape, to_fp( + x.flatten(), FixedImpl.FP8x23)) + y = Tensor(Dtype.FP8x23, y.shape, to_fp( + y.flatten(), FixedImpl.FP8x23)) + + name = "reduce_sum_fp8x23_2D_default" + make_test( + [x], y, "input_0.reduce_sum(0, false)", name) + + def keepdims(): + x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) + y = np.array([2, 4]).astype(np.int64).reshape(1, 2) + + x = Tensor(Dtype.FP8x23, x.shape, to_fp( + x.flatten(), FixedImpl.FP8x23)) + y = Tensor(Dtype.FP8x23, y.shape, to_fp( + y.flatten(), FixedImpl.FP8x23)) + + name = "reduce_sum_fp8x23_2D_keepdims" + make_test( + [x], y, "input_0.reduce_sum(0, true)", name) - x = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [ - [9, 10], [11, 12]]]).astype(np.uint32) - y = np.sum(x, axis=None, keepdims=keepdims == 1) + def axis_1(): + x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) + y = np.array([1, 5]).astype(np.int64) - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) + x = Tensor(Dtype.FP8x23, x.shape, to_fp( + x.flatten(), FixedImpl.FP8x23)) + y = Tensor(Dtype.FP8x23, y.shape, to_fp( + y.flatten(), FixedImpl.FP8x23)) - name = "reduce_sum_default_axes_keepdims" - make_test( - [x], y, "input_0.reduce_sum(Option::Some(array![].span()), Option::Some(true), Option::None)", name) + name = "reduce_sum_fp8x23_2D_axis_1" + make_test( + [x], y, "input_0.reduce_sum(1, false)", name) + + default() + keepdims() + axis_1() + + reduce_sum_1D() + reduce_sum_2D() @staticmethod - def reduce_sum_empty_axes_input_noop(): - x = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [ - [9, 10], [11, 12]]]).astype(np.uint32) - y = np.array(x) + def reduce_sum_fp16x16(): + def reduce_sum_1D(): + x = np.array([0, 1, 2,]).astype(np.int64) + y = np.array([3]).astype(np.int64) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp( + x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "reduce_sum_fp16x16_1D" + make_test( + [x], y, "input_0.reduce_sum(0, false)", name) + + def reduce_sum_2D(): + def default(): + x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) + y = np.array([2, 4]).astype(np.int64) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp( + x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "reduce_sum_fp16x16_2D_default" + make_test( + [x], y, "input_0.reduce_sum(0, false)", name) + + def keepdims(): + x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) + y = np.array([2, 4]).astype(np.int64).reshape(1, 2) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp( + x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "reduce_sum_fp16x16_2D_keepdims" + make_test( + [x], y, "input_0.reduce_sum(0, true)", name) + + def axis_1(): + x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) + y = np.array([1, 5]).astype(np.int64) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp( + x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "reduce_sum_fp16x16_2D_axis_1" + make_test( + [x], y, "input_0.reduce_sum(1, false)", name) - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) + default() + keepdims() + axis_1() - name = "reduce_sum_empty_axes_input_noop" - make_test( - [x], y, "input_0.reduce_sum(Option::None, Option::Some(true), Option::Some(true))", name) + reduce_sum_1D() + reduce_sum_2D() diff --git a/nodegen/node/reduce_sum_single_axis.py b/nodegen/node/reduce_sum_single_axis.py deleted file mode 100644 index 91197c45d..000000000 --- a/nodegen/node/reduce_sum_single_axis.py +++ /dev/null @@ -1,288 +0,0 @@ -import numpy as np -from nodegen.node import RunAll -from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl - - -class Reduce_sum_single_axis(RunAll): - @staticmethod - def reduce_sum_single_axis_u32(): - def reduce_sum_single_axis_1D(): - x = np.array([0, 1, 2,]).astype(np.uint32) - y = np.array([3]).astype(np.uint32) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "reduce_sum_single_axis_u32_1D" - make_test( - [x], y, "input_0.reduce_sum_single_axis(0, false)", name) - - def reduce_sum_single_axis_2D(): - def default(): - x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) - y = np.array([2, 4]).astype(np.uint32) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "reduce_sum_single_axis_u32_2D_default" - make_test( - [x], y, "input_0.reduce_sum_single_axis(0, false)", name) - - def keepdims(): - x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) - y = np.array([2, 4]).astype(np.uint32).reshape(1, 2) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "reduce_sum_single_axis_u32_2D_keepdims" - make_test( - [x], y, "input_0.reduce_sum_single_axis(0, true)", name) - - def axis_1(): - x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) - y = np.array([1, 5]).astype(np.uint32) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "reduce_sum_single_axis_u32_2D_axis_1" - make_test( - [x], y, "input_0.reduce_sum_single_axis(1, false)", name) - - default() - keepdims() - axis_1() - reduce_sum_single_axis_1D() - reduce_sum_single_axis_2D() - - @staticmethod - def reduce_sum_single_axis_i32(): - def reduce_sum_single_axis_1D(): - x = np.array([0, 1, 2,]).astype(np.int32) - y = np.array([3]).astype(np.int32) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reduce_sum_single_axis_i32_1D" - make_test( - [x], y, "input_0.reduce_sum_single_axis(0, false)", name) - - def reduce_sum_single_axis_2D(): - def default(): - x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) - y = np.array([2, 4]).astype(np.int32) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reduce_sum_single_axis_i32_2D_default" - make_test( - [x], y, "input_0.reduce_sum_single_axis(0, false)", name) - - def keepdims(): - x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) - y = np.array([2, 4]).astype(np.int32).reshape(1, 2) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reduce_sum_single_axis_i32_2D_keepdims" - make_test( - [x], y, "input_0.reduce_sum_single_axis(0, true)", name) - - def axis_1(): - x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) - y = np.array([1, 5]).astype(np.int32) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reduce_sum_single_axis_i32_2D_axis_1" - make_test( - [x], y, "input_0.reduce_sum_single_axis(1, false)", name) - - default() - keepdims() - axis_1() - reduce_sum_single_axis_1D() - reduce_sum_single_axis_2D() - - @staticmethod - def reduce_sum_single_axis_i8(): - def reduce_sum_single_axis_1D(): - x = np.array([0, 1, 2,]).astype(np.int8) - y = np.array([3]).astype(np.int8) - - x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) - y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) - - name = "reduce_sum_single_axis_i8_1D" - make_test( - [x], y, "input_0.reduce_sum_single_axis(0, false)", name) - - def reduce_sum_single_axis_2D(): - def default(): - x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) - y = np.array([2, 4]).astype(np.int8) - - x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) - y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) - - name = "reduce_sum_single_axis_i8_2D_default" - make_test( - [x], y, "input_0.reduce_sum_single_axis(0, false)", name) - - def keepdims(): - x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) - y = np.array([2, 4]).astype(np.int8).reshape(1, 2) - - x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) - y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) - - name = "reduce_sum_single_axis_i8_2D_keepdims" - make_test( - [x], y, "input_0.reduce_sum_single_axis(0, true)", name) - - def axis_1(): - x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) - y = np.array([1, 5]).astype(np.int8) - - x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) - y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) - - name = "reduce_sum_single_axis_i8_2D_axis_1" - make_test( - [x], y, "input_0.reduce_sum_single_axis(1, false)", name) - - default() - keepdims() - axis_1() - reduce_sum_single_axis_1D() - reduce_sum_single_axis_2D() - - @staticmethod - def reduce_sum_single_axis_fp8x23(): - def reduce_sum_single_axis_1D(): - x = np.array([0, 1, 2,]).astype(np.int64) - y = np.array([3]).astype(np.int64) - - x = Tensor(Dtype.FP8x23, x.shape, to_fp( - x.flatten(), FixedImpl.FP8x23)) - y = Tensor(Dtype.FP8x23, y.shape, to_fp( - y.flatten(), FixedImpl.FP8x23)) - - name = "reduce_sum_single_axis_fp8x23_1D" - make_test( - [x], y, "input_0.reduce_sum_single_axis(0, false)", name) - - def reduce_sum_single_axis_2D(): - def default(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([2, 4]).astype(np.int64) - - x = Tensor(Dtype.FP8x23, x.shape, to_fp( - x.flatten(), FixedImpl.FP8x23)) - y = Tensor(Dtype.FP8x23, y.shape, to_fp( - y.flatten(), FixedImpl.FP8x23)) - - name = "reduce_sum_single_axis_fp8x23_2D_default" - make_test( - [x], y, "input_0.reduce_sum_single_axis(0, false)", name) - - def keepdims(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([2, 4]).astype(np.int64).reshape(1, 2) - - x = Tensor(Dtype.FP8x23, x.shape, to_fp( - x.flatten(), FixedImpl.FP8x23)) - y = Tensor(Dtype.FP8x23, y.shape, to_fp( - y.flatten(), FixedImpl.FP8x23)) - - name = "reduce_sum_single_axis_fp8x23_2D_keepdims" - make_test( - [x], y, "input_0.reduce_sum_single_axis(0, true)", name) - - def axis_1(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([1, 5]).astype(np.int64) - - x = Tensor(Dtype.FP8x23, x.shape, to_fp( - x.flatten(), FixedImpl.FP8x23)) - y = Tensor(Dtype.FP8x23, y.shape, to_fp( - y.flatten(), FixedImpl.FP8x23)) - - name = "reduce_sum_single_axis_fp8x23_2D_axis_1" - make_test( - [x], y, "input_0.reduce_sum_single_axis(1, false)", name) - - default() - keepdims() - axis_1() - - reduce_sum_single_axis_1D() - reduce_sum_single_axis_2D() - - @staticmethod - def reduce_sum_single_axis_fp16x16(): - def reduce_sum_single_axis_1D(): - x = np.array([0, 1, 2,]).astype(np.int64) - y = np.array([3]).astype(np.int64) - - x = Tensor(Dtype.FP16x16, x.shape, to_fp( - x.flatten(), FixedImpl.FP16x16)) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "reduce_sum_single_axis_fp16x16_1D" - make_test( - [x], y, "input_0.reduce_sum_single_axis(0, false)", name) - - def reduce_sum_single_axis_2D(): - def default(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([2, 4]).astype(np.int64) - - x = Tensor(Dtype.FP16x16, x.shape, to_fp( - x.flatten(), FixedImpl.FP16x16)) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "reduce_sum_single_axis_fp16x16_2D_default" - make_test( - [x], y, "input_0.reduce_sum_single_axis(0, false)", name) - - def keepdims(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([2, 4]).astype(np.int64).reshape(1, 2) - - x = Tensor(Dtype.FP16x16, x.shape, to_fp( - x.flatten(), FixedImpl.FP16x16)) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "reduce_sum_single_axis_fp16x16_2D_keepdims" - make_test( - [x], y, "input_0.reduce_sum_single_axis(0, true)", name) - - def axis_1(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([1, 5]).astype(np.int64) - - x = Tensor(Dtype.FP16x16, x.shape, to_fp( - x.flatten(), FixedImpl.FP16x16)) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "reduce_sum_single_axis_fp16x16_2D_axis_1" - make_test( - [x], y, "input_0.reduce_sum_single_axis(1, false)", name) - - default() - keepdims() - axis_1() - - reduce_sum_single_axis_1D() - reduce_sum_single_axis_2D() diff --git a/nodegen/node/reshape.py b/nodegen/node/reshape.py deleted file mode 100644 index e2601cf14..000000000 --- a/nodegen/node/reshape.py +++ /dev/null @@ -1,120 +0,0 @@ -import numpy as np -from nodegen.node import RunAll -from ..helpers import make_test, Tensor, Dtype - -original_shape = [2, 3, 4] -data = np.random.random_sample(original_shape).astype(np.int32) - - -def reshape_reference_implementation( - data: np.ndarray, shape: np.ndarray, allowzero: int = 0 -) -> np.ndarray: - # replace zeros with corresponding dim size - # we need to do this because np.reshape doesn't support 0 by default unless 'allowzero' is set - new_shape = np.copy(shape) - if allowzero == 0: - zeros_index = np.where(shape == 0) - new_shape[zeros_index] = np.array(data.shape)[zeros_index] - reshaped = np.reshape(data, new_shape) - return reshaped - - -class Reshape(RunAll): - @staticmethod - def reshape_reordered_all_dims(): - y = reshape_reference_implementation( - data, np.array([4, 2, 3], dtype=np.int64)) - - x = Tensor(Dtype.I32, data.shape, data.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reshape_reordered_all_dims" - make_test([x], y, "input_0.reshape(array![4,2,3].span())", name) - - @staticmethod - def reshape_reordered_last_dims(): - y = reshape_reference_implementation( - data, np.array([2, 4, 3], dtype=np.int64)) - - x = Tensor(Dtype.I32, data.shape, data.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reshape_reordered_last_dims" - make_test([x], y, "input_0.reshape(array![2,4,3].span())", name) - - @staticmethod - def reshape_reduced_dims(): - y = reshape_reference_implementation( - data, np.array([2, 12], dtype=np.int64)) - - x = Tensor(Dtype.I32, data.shape, data.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reshape_reduced_dims" - make_test([x], y, "input_0.reshape(array![2,12].span())", name) - - @staticmethod - def reshape_extended_dims(): - y = reshape_reference_implementation( - data, np.array([2, 3, 2, 2], dtype=np.int64)) - - x = Tensor(Dtype.I32, data.shape, data.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reshape_extended_dims" - make_test([x], y, "input_0.reshape(array![2, 3, 2, 2].span())", name) - - @staticmethod - def reshape_one_dim(): - y = reshape_reference_implementation( - data, np.array([24], dtype=np.int64)) - - x = Tensor(Dtype.I32, data.shape, data.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reshape_one_dim" - make_test([x], y, "input_0.reshape(array![24].span())", name) - - @staticmethod - def reshape_negative_dim(): - y = reshape_reference_implementation( - data, np.array([2, -1, 2], dtype=np.int64)) - - x = Tensor(Dtype.I32, data.shape, data.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reshape_negative_dim" - make_test([x], y, "input_0.reshape(array![2, -1, 2].span())", name) - - @staticmethod - def reshape_negative_extended_dims(): - y = reshape_reference_implementation( - data, np.array([-1, 2, 3, 4], dtype=np.int64)) - - x = Tensor(Dtype.I32, data.shape, data.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reshape_negative_extended_dims" - make_test([x], y, "input_0.reshape(array![-1, 2, 3, 4].span())", name) - - @staticmethod - def reshape_zero_dim(): - y = reshape_reference_implementation( - data, np.array([2, 0, 4, 1], dtype=np.int64)) - - x = Tensor(Dtype.I32, data.shape, data.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reshape_zero_dim" - make_test([x], y, "input_0.reshape(array![2, 0, 4, 1].span())", name) - - @staticmethod - def reshape_zero_and_negative_dim(): - y = reshape_reference_implementation( - data, np.array([2, 0, 1, -1], dtype=np.int64)) - - x = Tensor(Dtype.I32, data.shape, data.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reshape_zero_and_negative_dim" - make_test([x], y, "input_0.reshape(array![2, 0, 1, -1].span())", name) diff --git a/nodegen/node/xor.py b/nodegen/node/xor.py index e9e9f37db..bd8c025c6 100644 --- a/nodegen/node/xor.py +++ b/nodegen/node/xor.py @@ -14,7 +14,7 @@ def default(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "xor_u32" make_test([x, y], z, "input_0.xor(@input_1)", name) @@ -26,7 +26,7 @@ def broadcast(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "xor_u32_broadcast" make_test([x, y], z, "input_0.xor(@input_1)", name) @@ -43,7 +43,7 @@ def default(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "xor_i32" make_test([x, y], z, "input_0.xor(@input_1)", name) @@ -55,7 +55,7 @@ def broadcast(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "xor_i32_broadcast" make_test([x, y], z, "input_0.xor(@input_1)", name) @@ -72,7 +72,7 @@ def default(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "xor_i8" make_test([x, y], z, "input_0.xor(@input_1)", name) @@ -84,7 +84,7 @@ def broadcast(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "xor_i8_broadcast" make_test([x, y], z, "input_0.xor(@input_1)", name) @@ -103,7 +103,7 @@ def default(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "xor_fp8x23" make_test([x, y], z, "input_0.xor(@input_1)", name) @@ -117,7 +117,7 @@ def broadcast(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "xor_fp8x23_broadcast" make_test([x, y], z, "input_0.xor(@input_1)", name) @@ -136,7 +136,7 @@ def default(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "xor_fp16x16" make_test([x, y], z, "input_0.xor(@input_1)", name) @@ -150,7 +150,7 @@ def broadcast(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.I32, z.shape, z.flatten()) + z = Tensor(Dtype.U32, z.shape, z.flatten()) name = "xor_fp16x16_broadcast" make_test([x, y], z, "input_0.xor(@input_1)", name) diff --git a/src/operators/nn/functional/col2im.cairo b/src/operators/nn/functional/col2im.cairo index a3ad47a49..4f9cfc1a8 100644 --- a/src/operators/nn/functional/col2im.cairo +++ b/src/operators/nn/functional/col2im.cairo @@ -56,16 +56,12 @@ fn col2im, +NumberTrait, +Copy, +Drop, +Ad let bl = prod(block_shape, 0); let C = *(*data).shape.at(1) / bl; - let mut new_shape: Array = array![ - (*(*data).shape.at(0)).try_into().unwrap(), C.try_into().unwrap(), bl.try_into().unwrap() - ]; + let mut new_shape = array![*(*data).shape.at(0), C, bl]; let mut i = 2; - while i != (*data) - .shape - .len() { - new_shape.append((*(*data).shape.at(i)).try_into().unwrap()); - i += 1; - }; + while i != (*data).shape.len() { + new_shape.append(*(*data).shape.at(i)); + i += 1; + }; let data = data.reshape(new_shape.span()); @@ -73,36 +69,30 @@ fn col2im, +NumberTrait, +Copy, +Drop, +Ad let data_stride = stride(data.shape); let mut n = 0; - while n != *data - .shape - .at(0) { - let mut c = 0; - while c != *data - .shape - .at(1) { - let data_n_c = TensorTrait::new( - SpanTrait::slice(data.shape, 2, data.shape.len() - 2), - SpanTrait::slice( - data.data, - n * *data_stride.at(0) + c * *data_stride.at(1), - *data_stride.at(1) - ) - ); - let mut out = col2im_naive_implementation( - @data_n_c, image_shape, block_shape, dilations, pads, strides - ); - let mut i = 0; - while i != out.len() { - res.append(out.at(i)); - i += 1; - }; - - c += 1; - }; + while n != *data.shape.at(0) { + let mut c = 0; + while c != *data.shape.at(1) { + let data_n_c = TensorTrait::new( + SpanTrait::slice(data.shape, 2, data.shape.len() - 2), + SpanTrait::slice( + data.data, n * *data_stride.at(0) + c * *data_stride.at(1), *data_stride.at(1) + ) + ); + let mut out = col2im_naive_implementation( + @data_n_c, image_shape, block_shape, dilations, pads, strides + ); + let mut i = 0; + while i != out.len() { + res.append(out.at(i)); + i += 1; + }; - n += 1; + c += 1; }; + n += 1; + }; + let mut new_shape = array![*data.shape.at(0), *data.shape.at(1)]; let mut i = 0; while i != image_shape.len() { diff --git a/src/operators/nn/functional/conv_transpose.cairo b/src/operators/nn/functional/conv_transpose.cairo index c24c2163d..f8f810558 100644 --- a/src/operators/nn/functional/conv_transpose.cairo +++ b/src/operators/nn/functional/conv_transpose.cairo @@ -61,13 +61,11 @@ fn conv_transpose< Option::None => { let mut output_padding: Array = array![]; let mut i = 2; - while i != (*X) - .shape - .len() { - output_padding.append(0); - output_padding.append(0); - i += 1; - }; + while i != (*X).shape.len() { + output_padding.append(0); + output_padding.append(0); + i += 1; + }; output_padding.span() }, @@ -153,11 +151,10 @@ fn conv_transpose< Option::None => { let mut output_shape: Array = array![]; let mut i = 0; - while i != strides - .len() { - output_shape.append(*(*X).shape.at(i + 2) * *strides.at(i)); - i += 1; - }; + while i != strides.len() { + output_shape.append(*(*X).shape.at(i + 2) * *strides.at(i)); + i += 1; + }; output_shape.span() }, @@ -165,17 +162,16 @@ fn conv_transpose< let mut total_padding: Array = array![]; let mut i = 0; - while i != output_shape - .len() { - total_padding - .append( - (*(*X).shape.at(i + 2) - 1) * *strides.at(i) - + *output_padding.at(i) - + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1) - - *output_shape.at(i) - ); - i += 1; - }; + while i != output_shape.len() { + total_padding + .append( + (*(*X).shape.at(i + 2) - 1) * *strides.at(i) + + *output_padding.at(i) + + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1) + - *output_shape.at(i) + ); + i += 1; + }; let total_padding = total_padding.span(); @@ -188,11 +184,10 @@ fn conv_transpose< }; let mut i = 0; - while i != output_shape - .len() { - pads.append(*total_padding.at(i) - (*total_padding.at(i) / 2)); - i += 1; - }; + while i != output_shape.len() { + pads.append(*total_padding.at(i) - (*total_padding.at(i) / 2)); + i += 1; + }; (pads.span(), pads.len() / 2, output_shape) }, @@ -202,11 +197,10 @@ fn conv_transpose< Option::None => { let mut output_shape: Array = array![]; let mut i = 0; - while i != strides - .len() { - output_shape.append(*(*X).shape.at(i + 2) * *strides.at(i)); - i += 1; - }; + while i != strides.len() { + output_shape.append(*(*X).shape.at(i + 2) * *strides.at(i)); + i += 1; + }; output_shape.span() }, @@ -214,28 +208,26 @@ fn conv_transpose< let mut total_padding: Array = array![]; let mut i = 0; - while i != output_shape - .len() { - total_padding - .append( - (*(*X).shape.at(i + 2) - 1) * *strides.at(i) - + *output_padding.at(i) - + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1) - - *output_shape.at(i) - ); - i += 1; - }; + while i != output_shape.len() { + total_padding + .append( + (*(*X).shape.at(i + 2) - 1) * *strides.at(i) + + *output_padding.at(i) + + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1) + - *output_shape.at(i) + ); + i += 1; + }; let total_padding = total_padding.span(); let mut pads: Array = array![]; let mut i = 0; - while i != output_shape - .len() { - pads.append(*total_padding.at(i) - *total_padding.at(i) / 2); - i += 1; - }; + while i != output_shape.len() { + pads.append(*total_padding.at(i) - *total_padding.at(i) / 2); + i += 1; + }; let mut i = 0; while i != output_shape.len() { @@ -310,62 +302,50 @@ fn conv_transpose< if group == 1 { let mut image_id = 0; - while image_id != *(*X) - .shape - .at(0) { - let w_t = TensorTrait::new(array![k, m].span(), (*W).data) - .transpose(array![1, 0].span()); - - let image = SpanTrait::slice((*X).data, image_id * k * n, k * n); - let gemm = w_t.matmul(@TensorTrait::new(array![k, n].span(), image)); - - let gemmc = gemm - .reshape( - array![ - num_output_channels.try_into().unwrap(), - (m / num_output_channels).try_into().unwrap(), - n.try_into().unwrap() - ] - .span() - ); - let mut c = 0; - while c != num_output_channels { - let gemmc_c = TensorTrait::new( - array![m / num_output_channels, n].span(), - SpanTrait::slice( - gemmc.data, - (m / num_output_channels) * n * c, - (m / num_output_channels) * n - ) - ); - - let mut res = col2im_naive_implementation( - @gemmc_c, output_shape, kernel_shape, dilations, pads, strides - ); - - match B { - Option::Some(B) => { - let mut i = 0; - while i != res - .len() { - res.set(i, res.at(i) + *(*B).data.at(c)); - i += 1; - }; - }, - Option::None => {}, - } + while image_id != *(*X).shape.at(0) { + let w_t = TensorTrait::new(array![k, m].span(), (*W).data) + .transpose(array![1, 0].span()); + + let image = SpanTrait::slice((*X).data, image_id * k * n, k * n); + let gemm = w_t.matmul(@TensorTrait::new(array![k, n].span(), image)); + + let gemmc = gemm + .reshape(array![num_output_channels, m / num_output_channels, n].span()); + let mut c = 0; + while c != num_output_channels { + let gemmc_c = TensorTrait::new( + array![m / num_output_channels, n].span(), + SpanTrait::slice( + gemmc.data, (m / num_output_channels) * n * c, (m / num_output_channels) * n + ) + ); + + let mut res = col2im_naive_implementation( + @gemmc_c, output_shape, kernel_shape, dilations, pads, strides + ); + + match B { + Option::Some(B) => { + let mut i = 0; + while i != res.len() { + res.set(i, res.at(i) + *(*B).data.at(c)); + i += 1; + }; + }, + Option::None => {}, + } - c += 1; + c += 1; - let mut i = 0; - while i != res.len() { - final.append(res.at(i)); - i += 1; - }; + let mut i = 0; + while i != res.len() { + final.append(res.at(i)); + i += 1; }; - - image_id += 1; }; + + image_id += 1; + }; } else { let mut output_array: Array> = array![]; @@ -383,21 +363,19 @@ fn conv_transpose< let mut group_W: Array = array![]; let mut image_id = 0; - while image_id != *(*X) - .shape - .at(0) { - let start = image_id * n * C + (group_id * C / group) * n; - let end = image_id * n * C + ((group_id + 1) * C / group) * n; - - let mut i = start; - while i != end { - group_X.append(*(*X).data.at(i)); - i += 1; - }; + while image_id != *(*X).shape.at(0) { + let start = image_id * n * C + (group_id * C / group) * n; + let end = image_id * n * C + ((group_id + 1) * C / group) * n; - image_id += 1; + let mut i = start; + while i != end { + group_X.append(*(*X).data.at(i)); + i += 1; }; + image_id += 1; + }; + let start = (group_id * C / group) * *(*W).shape.at(1) * kernel_size; let end = (group_id + 1) * C / group * *(*W).shape.at(1) * kernel_size; let mut i = start; @@ -455,26 +433,22 @@ fn conv_transpose< // Sorting result per item of the batch // output size : N (batch size) x num_output_channels x output_shape let mut image_id = 0; - while image_id != *(*X) - .shape - .at(0) { - let mut group_id = 0; - while group_id != group { - let group_output = *output_array.at(group_id); - let mut i = image_id * output_size * (num_output_channels / group); - - while i != (image_id + 1) - * output_size - * (num_output_channels / group) { - final.append(*group_output.at(i)); - i += 1; - }; - - group_id += 1; + while image_id != *(*X).shape.at(0) { + let mut group_id = 0; + while group_id != group { + let group_output = *output_array.at(group_id); + let mut i = image_id * output_size * (num_output_channels / group); + + while i != (image_id + 1) * output_size * (num_output_channels / group) { + final.append(*group_output.at(i)); + i += 1; }; - image_id += 1; + group_id += 1; }; + + image_id += 1; + }; } let mut shape = array![*(*X).shape.at(0), num_output_channels]; diff --git a/src/operators/nn/functional/depth_to_space.cairo b/src/operators/nn/functional/depth_to_space.cairo index 1201b1222..161ea46ad 100644 --- a/src/operators/nn/functional/depth_to_space.cairo +++ b/src/operators/nn/functional/depth_to_space.cairo @@ -20,32 +20,21 @@ fn depth_to_space< ) -> Tensor { assert((tensor.shape).len() == 4, 'Unexpected shape 4.'); - let blocksize_i32: i32 = blocksize.try_into().unwrap(); - - let b: i32 = (*(tensor.shape).at(0)).try_into().unwrap(); - let C: u32 = (*(tensor.shape).at(1)).try_into().unwrap(); - let H: i32 = (*(tensor.shape).at(2)).try_into().unwrap(); - let W: i32 = (*(tensor.shape).at(3)).try_into().unwrap(); - let finalshape: Array = array![ - b, - (C / (blocksize * blocksize)).try_into().unwrap(), - (H * blocksize_i32), - (W * blocksize_i32) - ]; + let b = (tensor.shape).at(0); + let C = (tensor.shape).at(1); + let H = (tensor.shape).at(2); + let W = (tensor.shape).at(3); + let finalshape = array![*b, *C / (blocksize * blocksize), *H * blocksize, *W * blocksize]; if mode == 'DCR' { - let tmpshape: Array = array![ - b, blocksize_i32, blocksize_i32, (C / (blocksize * blocksize)).try_into().unwrap(), H, W - ]; + let tmpshape = array![*b, blocksize, blocksize, *C / (blocksize * blocksize), *H, *W]; let reshaped = (tensor).reshape(target_shape: tmpshape.span()); let transposed = reshaped.transpose(axes: array![0, 3, 4, 1, 5, 2].span()); transposed.reshape(target_shape: finalshape.span()) } else { // assert mode == "CRD" - let tmpshape: Array = array![ - b, (C / (blocksize * blocksize)).try_into().unwrap(), blocksize_i32, blocksize_i32, H, W - ]; + let tmpshape = array![*b, *C / (blocksize * blocksize), blocksize, blocksize, *H, *W]; let reshaped = (tensor).reshape(target_shape: tmpshape.span()); let transposed = reshaped.transpose(axes: array![0, 1, 4, 2, 5, 3].span()); diff --git a/src/operators/nn/functional/logsoftmax.cairo b/src/operators/nn/functional/logsoftmax.cairo index 33df374f0..fdf89c43d 100644 --- a/src/operators/nn/functional/logsoftmax.cairo +++ b/src/operators/nn/functional/logsoftmax.cairo @@ -10,7 +10,7 @@ fn logsoftmax< z: @Tensor, axis: usize ) -> Tensor { let exp_tensor = z.exp(); - let sum = exp_tensor.reduce_sum_single_axis(axis, true); + let sum = exp_tensor.reduce_sum(axis, true); let softmax = exp_tensor / sum; let logsoftmax = softmax.log(); @@ -38,7 +38,7 @@ fn logsoftmaxWide< z: @Tensor, axis: usize ) -> Tensor { let exp_tensor: Tensor = exp_upcast(*z); - let sum = exp_tensor.reduce_sum_single_axis(axis, true); + let sum = exp_tensor.reduce_sum(axis, true); let softmax = div_downcast(@exp_tensor, @sum); softmax.log() diff --git a/src/operators/nn/functional/softmax.cairo b/src/operators/nn/functional/softmax.cairo index e8be7953d..10602bde7 100644 --- a/src/operators/nn/functional/softmax.cairo +++ b/src/operators/nn/functional/softmax.cairo @@ -13,7 +13,7 @@ fn softmax< z: @Tensor, axis: usize ) -> Tensor { let exp_tensor = z.exp(); - let sum = exp_tensor.reduce_sum_single_axis(axis, true); + let sum = exp_tensor.reduce_sum(axis, true); exp_tensor / sum } @@ -39,7 +39,7 @@ fn softmaxWide< z: @Tensor, axis: usize ) -> Tensor { let exp_tensor: Tensor = exp_upcast(*z); - let sum = exp_tensor.reduce_sum_single_axis(axis, true); + let sum = exp_tensor.reduce_sum(axis, true); div_downcast(@exp_tensor, @sum) } diff --git a/src/operators/nn/functional/space_to_depth.cairo b/src/operators/nn/functional/space_to_depth.cairo index c95b500a6..d8e8089cb 100644 --- a/src/operators/nn/functional/space_to_depth.cairo +++ b/src/operators/nn/functional/space_to_depth.cairo @@ -1,4 +1,3 @@ -use core::option::OptionTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; @@ -21,28 +20,14 @@ fn space_to_depth< ) -> Tensor { assert((tensor.shape).len() == 4, 'Unexpected shape 4.'); - let blocksize_i32: i32 = blocksize.try_into().unwrap(); - - let b: i32 = (*(tensor.shape).at(0)).try_into().unwrap(); - let C: i32 = (*(tensor.shape).at(1)).try_into().unwrap(); - let H: u32 = (*(tensor.shape).at(2)); - let W: u32 = (*(tensor.shape).at(3)); - let tmpshape = array![ - b, - C, - (H / blocksize).try_into().unwrap(), - blocksize_i32, - (W / blocksize).try_into().unwrap(), - blocksize_i32 - ]; + let b = (tensor.shape).at(0); + let C = (tensor.shape).at(1); + let H = (tensor.shape).at(2); + let W = (tensor.shape).at(3); + let tmpshape = array![*b, *C, *H / blocksize, blocksize, *W / blocksize, blocksize]; let reshaped = (tensor).reshape(target_shape: tmpshape.span()); let transposed = reshaped.transpose(axes: array![0, 3, 5, 1, 2, 4].span()); - let finalshape = array![ - b, - C * blocksize_i32 * blocksize_i32, - (H / blocksize).try_into().unwrap(), - (W / blocksize).try_into().unwrap() - ]; + let finalshape = array![*b, *C * blocksize * blocksize, *H / blocksize, *W / blocksize]; transposed.reshape(target_shape: finalshape.span()) } diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 5a673c3f9..0d21a4de3 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -53,8 +53,7 @@ impl TensorSerde, impl TDrop: Drop> of Serde { /// # tensor.reshape /// /// ```rust - /// fn reshape(self: @Tensor, target_shape: Span) -> Tensor; + /// fn reshape(self: @Tensor, target_shape: Span) -> Tensor; /// ``` /// /// Returns a new tensor with the specified target shape and the same data as the input tensor. @@ -567,7 +566,7 @@ trait TensorTrait { /// ## Args /// /// * `self`(`@Tensor`) - The input tensor. - /// * `target_shape`(Span) - A span containing the target shape of the tensor. + /// * `target_shape`(Span) - A span containing the target shape of the tensor. /// /// ## Panics /// @@ -595,7 +594,7 @@ trait TensorTrait { /// >>> [[0,1,2,3], [4,5,6,7]] /// ``` /// - fn reshape(self: @Tensor, target_shape: Span) -> Tensor; + fn reshape(self: @Tensor, target_shape: Span) -> Tensor; /// # tensor.transpose /// /// ```rust @@ -642,53 +641,6 @@ trait TensorTrait { /// fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; /// ``` /// - /// Computes the sum of the input tensor's elements along the provided axes - /// - /// ## Args - /// - /// * `self`(`@Tensor`) - The input tensor. - /// * `axes`(`Option>`) - Optional input list of integers, along which to reduce. - /// * `keepdims`(`Option`) - If true, retains reduced dimensions with length 1. - /// * `noop_with_empty_axes`(`Option`) - Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. - /// - /// ## Panics - /// - /// * Panics if axis is not in the range of the input tensor's dimensions. - /// - /// ## Returns - /// - /// A new `Tensor` instance with the specified axis reduced by summing its elements. - /// - /// ## Examples - /// - /// ```rust - /// use core::array::{ArrayTrait, SpanTrait}; - /// - /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; - /// - /// fn reduce_sum_example() -> Tensor { - /// let tensor = TensorTrait::::new( - /// shape: array![3, 2, 2].span(), data: array![1, 2, 3, 4, 5, 6, 7, 8 ,9, 10, 11, 12].span(), - /// ); - /// - /// // We can call `reduce_sum` function as follows. - /// return tensor.reduce_sum(Option::Some(array![1].span()), Option::Some(false), Option::None); - /// } - /// >>> [[4, 6] [12, 14] [20, 22]] - /// ``` - /// - fn reduce_sum( - self: @Tensor, - axes: Option>, - keepdims: Option, - noop_with_empty_axes: Option - ) -> Tensor; - /// ## tensor.reduce_sum_single_axis - /// - /// ```rust - /// fn reduce_sum_single_axis(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; - /// ``` - /// /// Reduces a tensor by summing its elements along a specified axis. /// /// ## Args @@ -712,18 +664,18 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn reduce_sum_single_axis_example() -> Tensor { + /// fn reduce_sum_example() -> Tensor { /// let tensor = TensorTrait::::new( /// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(), /// ); /// - /// // We can call `reduce_sum_single_axis` function as follows. - /// return tensor.reduce_sum_single_axis(axis: 0, keepdims: false); + /// // We can call `reduce_sum` function as follows. + /// return tensor.reduce_sum(axis: 0, keepdims: false); /// } /// >>> [[4,6],[8,10]] /// ``` /// - fn reduce_sum_single_axis(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; + fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; /// # tensor.argmax /// /// ```rust @@ -1085,7 +1037,7 @@ trait TensorTrait { /// #tensor.equal /// /// ```rust - /// fn equal(self: @Tensor, other: @Tensor) -> Tensor; + /// fn equal(self: @Tensor, other: @Tensor) -> Tensor; /// ``` /// /// Check if two tensors are equal element-wise. @@ -1104,7 +1056,7 @@ trait TensorTrait { /// /// ## Returns /// - /// A new `Tensor` (1 if equal, 0 otherwise) with the same shape as the broadcasted inputs. + /// A new `Tensor` of booleans (1 if equal, 0 otherwise) with the same shape as the broadcasted inputs. /// /// ## Examples /// @@ -1115,7 +1067,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn eq_example() -> Tensor { + /// fn eq_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1127,7 +1079,7 @@ trait TensorTrait { /// // We can call `equal` function as follows. /// return tensor_1.equal(@tensor_2); /// } - /// >>> [true,true,true,true,true,false,false,false] + /// >>> [1,1,1,1,1,0,0,0] /// ``` /// /// Case 2: Compare tensors with different shapes @@ -1137,7 +1089,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn eq_example() -> Tensor { + /// fn eq_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1147,14 +1099,14 @@ trait TensorTrait { /// // We can call `equal` function as follows. /// return tensor_1.equal(@tensor_2); /// } - /// >>> [true,true,true,false,false,false,false,false,false] + /// >>> [1,1,1,0,0,0,0,0,0] /// ``` /// - fn equal(self: @Tensor, other: @Tensor) -> Tensor; + fn equal(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.greater /// /// ```rust - /// fn greater(self: @Tensor, other: @Tensor) -> Tensor; + /// fn greater(self: @Tensor, other: @Tensor) -> Tensor; /// ``` /// /// Check if each element of the first tensor is greater than the corresponding element of the second tensor. @@ -1173,7 +1125,7 @@ trait TensorTrait { /// /// ## Returns /// - /// A new `Tensor` of booleans (0 or 1) with the same shape as the broadcasted inputs. + /// A new `Tensor` of booleans (0 or 1) with the same shape as the broadcasted inputs. /// /// ## Examples /// @@ -1184,7 +1136,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn greater_example() -> Tensor { + /// fn greater_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1206,7 +1158,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn greater_example() -> Tensor { + /// fn greater_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1219,11 +1171,11 @@ trait TensorTrait { /// >>> [0,0,0,1,1,1,1,1,1] /// ``` /// - fn greater(self: @Tensor, other: @Tensor) -> Tensor; + fn greater(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.greater_equal /// /// ```rust - /// fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor; + /// fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor; /// ``` /// /// Check if each element of the first tensor is greater than or equal to the corresponding element of the second tensor. @@ -1253,7 +1205,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn greater_equal_example() -> Tensor { + /// fn greater_equal_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1275,7 +1227,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn greater_equal_example() -> Tensor { + /// fn greater_equal_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1288,11 +1240,11 @@ trait TensorTrait { /// >>> [1,1,1,1,1,1,0,0,0] /// ``` /// - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor; + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.less /// /// ```rust - /// fn less(self: @Tensor, other: @Tensor) -> Tensor; + /// fn less(self: @Tensor, other: @Tensor) -> Tensor; /// ``` /// /// Check if each element of the first tensor is less than the corresponding element of the second tensor. @@ -1322,7 +1274,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn less_example() -> Tensor { + /// fn less_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1344,7 +1296,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn less_example() -> Tensor { + /// fn less_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1354,14 +1306,14 @@ trait TensorTrait { /// // We can call `less` function as follows. /// return tensor_1.less(@tensor_2); /// } - /// >>> [false,false,false,false,false,false,false,true,true] + /// >>> [0,0,0,0,0,0,0,1,1] /// ``` /// - fn less(self: @Tensor, other: @Tensor) -> Tensor; + fn less(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.less_equal /// /// ```rust - /// fn less_equal(self: @Tensor, other: @Tensor) -> Tensor; + /// fn less_equal(self: @Tensor, other: @Tensor) -> Tensor; /// ``` /// /// Check if each element of the first tensor is less than or equal to the corresponding element of the second tensor. @@ -1391,7 +1343,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn less_equal_example() -> Tensor { + /// fn less_equal_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1413,7 +1365,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn less_equal_example() -> Tensor { + /// fn less_equal_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1426,7 +1378,7 @@ trait TensorTrait { /// >>> [1,1,1,0,0,0,1,1,1] /// ``` /// - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor; + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.abs /// /// ```rust @@ -2158,7 +2110,7 @@ trait TensorTrait { /// #tensor.or /// /// ```rust - /// fn or(self: @Tensor, other: @Tensor) -> Tensor; + /// fn or(self: @Tensor, other: @Tensor) -> Tensor; /// ``` /// /// Computes the logical OR of two tensors element-wise. @@ -2177,7 +2129,7 @@ trait TensorTrait { /// /// ## Returns /// - /// A new `Tensor` (0 or 1) with the same shape as the broadcasted inputs. + /// A new `Tensor` of booleans (0 or 1) with the same shape as the broadcasted inputs. /// /// ## Examples /// @@ -2188,7 +2140,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn or_example() -> Tensor { + /// fn or_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -2209,7 +2161,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn or_example() -> Tensor { + /// fn or_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -2223,11 +2175,11 @@ trait TensorTrait { /// >>> [0,1,1,1,1,1,1,1,1] /// ``` /// - fn or(self: @Tensor, other: @Tensor) -> Tensor; + fn or(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.xor /// /// ```rust - /// fn xor(self: @Tensor, other: @Tensor) -> Tensor; + /// fn xor(self: @Tensor, other: @Tensor) -> Tensor; /// ``` /// /// Computes the logical XOR of two tensors element-wise. @@ -2246,7 +2198,7 @@ trait TensorTrait { /// /// ## Returns /// - /// A new `Tensor` (0 or 1) with the same shape as the broadcasted inputs. + /// A new `Tensor` of booleans (0 or 1) with the same shape as the broadcasted inputs. /// /// ## Examples /// @@ -2257,7 +2209,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn xor_example() -> Tensor { + /// fn xor_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -2278,7 +2230,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn xor_example() -> Tensor { + /// fn xor_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -2292,7 +2244,7 @@ trait TensorTrait { /// >>> [0,0,0,1,0,0,1,0,0] /// ``` /// - fn xor(self: @Tensor, other: @Tensor) -> Tensor; + fn xor(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.acos /// /// ```rust @@ -3465,7 +3417,7 @@ trait TensorTrait { /// #tensor.and /// /// ```rust - /// fn and(self: @Tensor, other: @Tensor) -> Tensor; + /// fn and(self: @Tensor, other: @Tensor) -> Tensor; /// ``` /// /// Computes the logical AND of two tensors element-wise. @@ -3484,7 +3436,7 @@ trait TensorTrait { /// /// ## Returns /// - /// A new `Tensor` with the same shape as the broadcasted inputs. + /// A new `Tensor` with the same shape as the broadcasted inputs. /// /// ## Examples /// @@ -3493,7 +3445,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, BoolTensor}; /// - /// fn and_example() -> Tensor { + /// fn and_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3].span(), data: array![false, true, false, false, false, true, true, false, true, false, false, true].span(), /// ); @@ -3507,7 +3459,7 @@ trait TensorTrait { /// >>> [false, false, false, false, false, true, false, false, false, false, false, true] /// ``` /// - fn and(self: @Tensor, other: @Tensor) -> Tensor; + fn and(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.where /// /// ```rust @@ -4677,7 +4629,7 @@ trait TensorTrait { /// ## tensor.is_inf /// /// ```rust - /// fn is_inf(self: @Tensor, detect_negative: Option, detect_positive: Option) -> Tensor; + /// fn is_inf(self: @Tensor, detect_negative: Option, detect_positive: Option) -> Tensor; /// ``` /// /// Maps infinity to true and other values to false. @@ -4699,7 +4651,7 @@ trait TensorTrait { /// use core::array::{ArrayTrait, SpanTrait}; /// use orion::operators::tensor::{BoolTensor, TensorTrait, Tensor, U32Tensor}; /// - /// fn is_inf_example() -> Tensor { + /// fn is_inf_example() -> Tensor { /// let tensor = TensorTrait::::new( /// shape: array![6].span(), data: array![1, 0, NumberTrait::INF(), 8, NumberTrait::INF(), NumberTrait::INF()].span(), /// ); @@ -4711,11 +4663,11 @@ trait TensorTrait { /// fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor; + ) -> Tensor; /// ## tensor.is_nan /// /// ```rust - /// fn is_nan(self: @Tensor) -> Tensor; + /// fn is_nan(self: @Tensor) -> Tensor; /// ``` /// /// Maps NaN to true and other values to false. @@ -4735,7 +4687,7 @@ trait TensorTrait { /// use orion::operators::tensor::{BoolTensor, TensorTrait, Tensor, FP8x23Tensor}; /// use orion::numbers::{FixedTrait, FP8x23}; /// - /// fn is_nan_example() -> Tensor { + /// fn is_nan_example() -> Tensor { /// let mut shape = ArrayTrait::::new(); /// shape.append(4); /// @@ -4751,11 +4703,11 @@ trait TensorTrait { /// >>> [false, false, true, false] /// ``` /// - fn is_nan(self: @Tensor) -> Tensor; + fn is_nan(self: @Tensor) -> Tensor; /// #tensor.not /// /// ```rust - /// fn not(self: @Tensor) -> Tensor; + /// fn not(self: @Tensor) -> Tensor) -> Span { /// Cf: TensorTrait::reshape docstring -fn reshape>>(self: @Tensor, target_shape: Span) -> Tensor { - // Calculate the total number of elements in the original tensor - let mut total_elements = 1; - let mut shape = *self.shape; - loop { - match shape.pop_front() { - Option::Some(val) => total_elements *= *val, - Option::None => { break; } - }; - }; - - // Calculate 'elements_so_far' and find 'inferred_index' - let mut elements_so_far = 1; - let mut inferred_index = Option::None; - let mut target_shape_clone = target_shape.clone(); - let mut i: usize = 0; - loop { - match target_shape_clone.pop_front() { - Option::Some(dim) => { - if *dim == -1 { - if inferred_index.is_none() { - inferred_index = Option::Some(i); - } else { - panic!("Only one dimension can be inferred"); - } - } else if *dim == 0 { - if i >= (*self.shape).len() { - panic!("Dimension out of bounds for using original dimension value"); - } - elements_so_far *= *(*self).shape.at(i); - } else { - if *dim < -1 { - panic!("Invalid dimension size"); - } - elements_so_far *= (*dim).try_into().unwrap(); - }; - }, - Option::None => { break; } - }; - i+=1; - }; - - let mut target_shape_clone = target_shape.clone(); - let mut inferred_shape = ArrayTrait::::new(); - let mut i: usize = 0; - loop { - match target_shape_clone.pop_front() { - Option::Some(dim) => { - if *dim == -1 { - inferred_shape.append(total_elements / elements_so_far) // Inferred dimension - } else if *dim == 0 { - if i >= (*self.shape).len() { - panic!("Dimension out of bounds for using original dimension value"); - } - inferred_shape.append(*(*self).shape.at(i)) // Dimension unchanged from original - } else { - inferred_shape.append((*dim).try_into().unwrap()) - }; - }, - Option::None => { break; } - } - i+=1; - }; - - new_tensor(inferred_shape.span(), *self.data) +fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + new_tensor(target_shape, *self.data) } /// Cf: TensorTrait::at docstring diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index 8d7f6706c..612a397cc 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -60,20 +60,11 @@ impl BoolTensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + fn reshape(self: @Tensor, target_shape: Span) -> Tensor { reshape(self, target_shape) } - fn reduce_sum( - self: @Tensor, - axes: Option>, - keepdims: Option, - noop_with_empty_axes: Option - ) -> Tensor { - panic(array!['not supported!']) - } - - fn reduce_sum_single_axis(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { panic(array!['not supported!']) } @@ -109,23 +100,23 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } - fn greater(self: @Tensor, other: @Tensor) -> Tensor { + fn greater(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } @@ -187,11 +178,11 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } - fn xor(self: @Tensor, other: @Tensor) -> Tensor { + fn xor(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } - fn or(self: @Tensor, other: @Tensor) -> Tensor { + fn or(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } @@ -259,7 +250,7 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } @@ -419,11 +410,11 @@ impl BoolTensor of TensorTrait { fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor { + ) -> Tensor { panic(array!['not supported!']) } - fn is_nan(self: @Tensor) -> Tensor { + fn is_nan(self: @Tensor) -> Tensor { panic(array!['not supported!']) } @@ -579,19 +570,17 @@ impl BoolTryIntobool of TryInto { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 - && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() != 0 - && is_eq { - is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); - }; + while lhs.data.len() != 0 && is_eq { + is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index f6f2b4f15..c9c31ae23 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -69,27 +69,14 @@ impl Complex64Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + fn reshape(self: @Tensor, target_shape: Span) -> Tensor { reshape(self, target_shape) } - fn reduce_sum( - self: @Tensor, - axes: Option>, - keepdims: Option, - noop_with_empty_axes: Option - ) -> Tensor { - math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) - } - - - fn reduce_sum_single_axis( - self: @Tensor, axis: usize, keepdims: bool - ) -> Tensor { - math::reduce_sum_single_axis::reduce_sum_single_axis(self, axis, keepdims) + fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_sum::reduce_sum(self, axis, keepdims) } - fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { math::reduce_prod::reduce_prod(self, axis, keepdims) } @@ -128,23 +115,23 @@ impl Complex64Tensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } - fn greater(self: @Tensor, other: @Tensor) -> Tensor { + fn greater(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } @@ -206,11 +193,11 @@ impl Complex64Tensor of TensorTrait { math::atan::atan(*self) } - fn xor(self: @Tensor, other: @Tensor) -> Tensor { + fn xor(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } - fn or(self: @Tensor, other: @Tensor) -> Tensor { + fn or(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } @@ -347,7 +334,7 @@ impl Complex64Tensor of TensorTrait { panic(array!['not supported!']) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } @@ -456,11 +443,11 @@ impl Complex64Tensor of TensorTrait { fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor { + ) -> Tensor { panic(array!['not supported!']) } - fn is_nan(self: @Tensor) -> Tensor { + fn is_nan(self: @Tensor) -> Tensor { panic(array!['not supported!']) } @@ -681,19 +668,17 @@ fn eq(lhs: @complex64, rhs: @complex64) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 - && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() != 0 - && is_eq { - is_eq = eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); - }; + while lhs.data.len() != 0 && is_eq { + is_eq = eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index 1671b07bd..a37ed0442 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -71,23 +71,12 @@ impl FP16x16Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + fn reshape(self: @Tensor, target_shape: Span) -> Tensor { reshape(self, target_shape) } - fn reduce_sum( - self: @Tensor, - axes: Option>, - keepdims: Option, - noop_with_empty_axes: Option - ) -> Tensor { - math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) - } - - fn reduce_sum_single_axis( - self: @Tensor, axis: usize, keepdims: bool - ) -> Tensor { - math::reduce_sum_single_axis::reduce_sum_single_axis(self, axis, keepdims) + fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_sum::reduce_sum(self, axis, keepdims) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { @@ -122,23 +111,23 @@ impl FP16x16Tensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } - fn greater(self: @Tensor, other: @Tensor) -> Tensor { + fn greater(self: @Tensor, other: @Tensor) -> Tensor { math::greater::greater(self, other) } - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } @@ -200,11 +189,11 @@ impl FP16x16Tensor of TensorTrait { math::atan::atan(*self) } - fn xor(self: @Tensor, other: @Tensor) -> Tensor { + fn xor(self: @Tensor, other: @Tensor) -> Tensor { math::xor::xor(self, other) } - fn or(self: @Tensor, other: @Tensor) -> Tensor { + fn or(self: @Tensor, other: @Tensor) -> Tensor { math::or::or(self, other) } @@ -385,7 +374,7 @@ impl FP16x16Tensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } @@ -487,11 +476,11 @@ impl FP16x16Tensor of TensorTrait { fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor { + ) -> Tensor { math::is_inf::is_inf(self, detect_negative, detect_positive) } - fn is_nan(self: @Tensor) -> Tensor { + fn is_nan(self: @Tensor) -> Tensor { math::is_nan::is_nan(self) } @@ -771,19 +760,17 @@ fn relative_eq(lhs: @FP16x16, rhs: @FP16x16) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 - && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() != 0 - && is_eq { - is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); - }; + while lhs.data.len() != 0 && is_eq { + is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index b542b625f..2003b28ff 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -75,23 +75,12 @@ impl FP16x16WTensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + fn reshape(self: @Tensor, target_shape: Span) -> Tensor { reshape(self, target_shape) } - fn reduce_sum( - self: @Tensor, - axes: Option>, - keepdims: Option, - noop_with_empty_axes: Option - ) -> Tensor { - math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) - } - - fn reduce_sum_single_axis( - self: @Tensor, axis: usize, keepdims: bool - ) -> Tensor { - math::reduce_sum_single_axis::reduce_sum_single_axis(self, axis, keepdims) + fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_sum::reduce_sum(self, axis, keepdims) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { @@ -132,23 +121,23 @@ impl FP16x16WTensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } - fn greater(self: @Tensor, other: @Tensor) -> Tensor { + fn greater(self: @Tensor, other: @Tensor) -> Tensor { math::greater::greater(self, other) } - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } @@ -210,11 +199,11 @@ impl FP16x16WTensor of TensorTrait { math::atan::atan(*self) } - fn xor(self: @Tensor, other: @Tensor) -> Tensor { + fn xor(self: @Tensor, other: @Tensor) -> Tensor { math::xor::xor(self, other) } - fn or(self: @Tensor, other: @Tensor) -> Tensor { + fn or(self: @Tensor, other: @Tensor) -> Tensor { math::or::or(self, other) } @@ -351,7 +340,7 @@ impl FP16x16WTensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } @@ -457,11 +446,11 @@ impl FP16x16WTensor of TensorTrait { fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor { + ) -> Tensor { math::is_inf::is_inf(self, detect_negative, detect_positive) } - fn is_nan(self: @Tensor) -> Tensor { + fn is_nan(self: @Tensor) -> Tensor { math::is_nan::is_nan(self) } @@ -730,19 +719,17 @@ fn relative_eq(lhs: @FP16x16W, rhs: @FP16x16W) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 - && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() != 0 - && is_eq { - is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); - }; + while lhs.data.len() != 0 && is_eq { + is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 1ba415db9..4870226a1 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -68,24 +68,12 @@ impl FP32x32Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + fn reshape(self: @Tensor, target_shape: Span) -> Tensor { reshape(self, target_shape) } - fn reduce_sum( - self: @Tensor, - axes: Option>, - keepdims: Option, - noop_with_empty_axes: Option - ) -> Tensor { - math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) - } - - - fn reduce_sum_single_axis( - self: @Tensor, axis: usize, keepdims: bool - ) -> Tensor { - math::reduce_sum_single_axis::reduce_sum_single_axis(self, axis, keepdims) + fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_sum::reduce_sum(self, axis, keepdims) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { @@ -120,23 +108,23 @@ impl FP32x32Tensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } - fn greater(self: @Tensor, other: @Tensor) -> Tensor { + fn greater(self: @Tensor, other: @Tensor) -> Tensor { math::greater::greater(self, other) } - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } @@ -198,11 +186,11 @@ impl FP32x32Tensor of TensorTrait { math::atan::atan(*self) } - fn xor(self: @Tensor, other: @Tensor) -> Tensor { + fn xor(self: @Tensor, other: @Tensor) -> Tensor { math::xor::xor(self, other) } - fn or(self: @Tensor, other: @Tensor) -> Tensor { + fn or(self: @Tensor, other: @Tensor) -> Tensor { math::or::or(self, other) } @@ -383,7 +371,7 @@ impl FP32x32Tensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } @@ -485,11 +473,11 @@ impl FP32x32Tensor of TensorTrait { fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor { + ) -> Tensor { math::is_inf::is_inf(self, detect_negative, detect_positive) } - fn is_nan(self: @Tensor) -> Tensor { + fn is_nan(self: @Tensor) -> Tensor { math::is_nan::is_nan(self) } @@ -778,19 +766,17 @@ fn relative_eq(lhs: @FP32x32, rhs: @FP32x32) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 - && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() != 0 - && is_eq { - is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); - }; + while lhs.data.len() != 0 && is_eq { + is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 1f276a37d..3a7214d18 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -68,23 +68,12 @@ impl FP64x64Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + fn reshape(self: @Tensor, target_shape: Span) -> Tensor { reshape(self, target_shape) } - fn reduce_sum( - self: @Tensor, - axes: Option>, - keepdims: Option, - noop_with_empty_axes: Option - ) -> Tensor { - math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) - } - - fn reduce_sum_single_axis( - self: @Tensor, axis: usize, keepdims: bool - ) -> Tensor { - math::reduce_sum_single_axis::reduce_sum_single_axis(self, axis, keepdims) + fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_sum::reduce_sum(self, axis, keepdims) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { @@ -119,23 +108,23 @@ impl FP64x64Tensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } - fn greater(self: @Tensor, other: @Tensor) -> Tensor { + fn greater(self: @Tensor, other: @Tensor) -> Tensor { math::greater::greater(self, other) } - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } @@ -197,11 +186,11 @@ impl FP64x64Tensor of TensorTrait { math::atan::atan(*self) } - fn xor(self: @Tensor, other: @Tensor) -> Tensor { + fn xor(self: @Tensor, other: @Tensor) -> Tensor { math::xor::xor(self, other) } - fn or(self: @Tensor, other: @Tensor) -> Tensor { + fn or(self: @Tensor, other: @Tensor) -> Tensor { math::or::or(self, other) } @@ -382,7 +371,7 @@ impl FP64x64Tensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } @@ -484,11 +473,11 @@ impl FP64x64Tensor of TensorTrait { fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor { + ) -> Tensor { math::is_inf::is_inf(self, detect_negative, detect_positive) } - fn is_nan(self: @Tensor) -> Tensor { + fn is_nan(self: @Tensor) -> Tensor { math::is_nan::is_nan(self) } @@ -777,19 +766,17 @@ fn relative_eq(lhs: @FP64x64, rhs: @FP64x64) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 - && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.shape.len() != 0 - && is_eq { - is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); - }; + while lhs.shape.len() != 0 && is_eq { + is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index f27237169..b4a26d749 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -68,27 +68,14 @@ impl FP8x23Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + fn reshape(self: @Tensor, target_shape: Span) -> Tensor { reshape(self, target_shape) } - fn reduce_sum( - self: @Tensor, - axes: Option>, - keepdims: Option, - noop_with_empty_axes: Option - ) -> Tensor { - math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) + fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_sum::reduce_sum(self, axis, keepdims) } - - fn reduce_sum_single_axis( - self: @Tensor, axis: usize, keepdims: bool - ) -> Tensor { - math::reduce_sum_single_axis::reduce_sum_single_axis(self, axis, keepdims) - } - - fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { math::reduce_prod::reduce_prod(self, axis, keepdims) } @@ -121,23 +108,23 @@ impl FP8x23Tensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } - fn greater(self: @Tensor, other: @Tensor) -> Tensor { + fn greater(self: @Tensor, other: @Tensor) -> Tensor { math::greater::greater(self, other) } - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } @@ -199,11 +186,11 @@ impl FP8x23Tensor of TensorTrait { math::atan::atan(*self) } - fn xor(self: @Tensor, other: @Tensor) -> Tensor { + fn xor(self: @Tensor, other: @Tensor) -> Tensor { math::xor::xor(self, other) } - fn or(self: @Tensor, other: @Tensor) -> Tensor { + fn or(self: @Tensor, other: @Tensor) -> Tensor { math::or::or(self, other) } @@ -384,7 +371,7 @@ impl FP8x23Tensor of TensorTrait { core_ops::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } @@ -484,11 +471,11 @@ impl FP8x23Tensor of TensorTrait { fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor { + ) -> Tensor { math::is_inf::is_inf(self, detect_negative, detect_positive) } - fn is_nan(self: @Tensor) -> Tensor { + fn is_nan(self: @Tensor) -> Tensor { math::is_nan::is_nan(self) } @@ -790,19 +777,17 @@ fn relative_eq(lhs: @FP8x23, rhs: @FP8x23) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 - && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() != 0 - && is_eq { - is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); - }; + while lhs.data.len() != 0 && is_eq { + is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index ba6cc8ce7..06a297b69 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -71,27 +71,14 @@ impl FP8x23WTensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + fn reshape(self: @Tensor, target_shape: Span) -> Tensor { reshape(self, target_shape) } - fn reduce_sum( - self: @Tensor, - axes: Option>, - keepdims: Option, - noop_with_empty_axes: Option - ) -> Tensor { - math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) + fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_sum::reduce_sum(self, axis, keepdims) } - - fn reduce_sum_single_axis( - self: @Tensor, axis: usize, keepdims: bool - ) -> Tensor { - math::reduce_sum_single_axis::reduce_sum_single_axis(self, axis, keepdims) - } - - fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { math::reduce_prod::reduce_prod(self, axis, keepdims) } @@ -124,23 +111,23 @@ impl FP8x23WTensor of TensorTrait { math::log::log(*self) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } - fn greater(self: @Tensor, other: @Tensor) -> Tensor { + fn greater(self: @Tensor, other: @Tensor) -> Tensor { math::greater::greater(self, other) } - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } @@ -202,11 +189,11 @@ impl FP8x23WTensor of TensorTrait { math::atan::atan(*self) } - fn xor(self: @Tensor, other: @Tensor) -> Tensor { + fn xor(self: @Tensor, other: @Tensor) -> Tensor { math::xor::xor(self, other) } - fn or(self: @Tensor, other: @Tensor) -> Tensor { + fn or(self: @Tensor, other: @Tensor) -> Tensor { math::or::or(self, other) } @@ -338,7 +325,7 @@ impl FP8x23WTensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } @@ -441,11 +428,11 @@ impl FP8x23WTensor of TensorTrait { fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor { + ) -> Tensor { math::is_inf::is_inf(self, detect_negative, detect_positive) } - fn is_nan(self: @Tensor) -> Tensor { + fn is_nan(self: @Tensor) -> Tensor { math::is_nan::is_nan(self) } @@ -733,19 +720,17 @@ fn relative_eq(lhs: @FP8x23W, rhs: @FP8x23W) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 - && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() != 0 - && is_eq { - is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); - }; + while lhs.data.len() != 0 && is_eq { + is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index eb97af01b..296876516 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -68,22 +68,14 @@ impl I32Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + fn reshape(self: @Tensor, target_shape: Span) -> Tensor { reshape(self, target_shape) } - fn reduce_sum( - self: @Tensor, - axes: Option>, - keepdims: Option, - noop_with_empty_axes: Option - ) -> Tensor { - math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) + fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_sum::reduce_sum(self, axis, keepdims) } - fn reduce_sum_single_axis(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum_single_axis::reduce_sum_single_axis(self, axis, keepdims) - } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { math::reduce_prod::reduce_prod(self, axis, keepdims) @@ -117,23 +109,23 @@ impl I32Tensor of TensorTrait { panic(array!['not supported!']) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } - fn greater(self: @Tensor, other: @Tensor) -> Tensor { + fn greater(self: @Tensor, other: @Tensor) -> Tensor { math::greater::greater(self, other) } - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } @@ -195,11 +187,11 @@ impl I32Tensor of TensorTrait { panic(array!['not supported!']) } - fn xor(self: @Tensor, other: @Tensor) -> Tensor { + fn xor(self: @Tensor, other: @Tensor) -> Tensor { math::xor::xor(self, other) } - fn or(self: @Tensor, other: @Tensor) -> Tensor { + fn or(self: @Tensor, other: @Tensor) -> Tensor { math::or::or(self, other) } @@ -372,7 +364,7 @@ impl I32Tensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } @@ -472,11 +464,11 @@ impl I32Tensor of TensorTrait { fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor { + ) -> Tensor { math::is_inf::is_inf(self, detect_negative, detect_positive) } - fn is_nan(self: @Tensor) -> Tensor { + fn is_nan(self: @Tensor) -> Tensor { panic(array!['not supported!']) } @@ -719,19 +711,17 @@ impl I32TensorPartialOrd of PartialOrd> { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 - && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() != 0 - && is_eq { - is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); - }; + while lhs.data.len() != 0 && is_eq { + is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index f1b23b57b..42d807c68 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -66,21 +66,12 @@ impl I8Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + fn reshape(self: @Tensor, target_shape: Span) -> Tensor { reshape(self, target_shape) } - fn reduce_sum( - self: @Tensor, - axes: Option>, - keepdims: Option, - noop_with_empty_axes: Option - ) -> Tensor { - math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) - } - - fn reduce_sum_single_axis(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum_single_axis::reduce_sum_single_axis(self, axis, keepdims) + fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_sum::reduce_sum(self, axis, keepdims) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { @@ -115,23 +106,23 @@ impl I8Tensor of TensorTrait { panic(array!['not supported!']) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } - fn greater(self: @Tensor, other: @Tensor) -> Tensor { + fn greater(self: @Tensor, other: @Tensor) -> Tensor { math::greater::greater(self, other) } - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } @@ -193,11 +184,11 @@ impl I8Tensor of TensorTrait { panic(array!['not supported!']) } - fn xor(self: @Tensor, other: @Tensor) -> Tensor { + fn xor(self: @Tensor, other: @Tensor) -> Tensor { math::xor::xor(self, other) } - fn or(self: @Tensor, other: @Tensor) -> Tensor { + fn or(self: @Tensor, other: @Tensor) -> Tensor { math::or::or(self, other) } @@ -376,7 +367,7 @@ impl I8Tensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } @@ -476,11 +467,11 @@ impl I8Tensor of TensorTrait { fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor { + ) -> Tensor { math::is_inf::is_inf(self, detect_negative, detect_positive) } - fn is_nan(self: @Tensor) -> Tensor { + fn is_nan(self: @Tensor) -> Tensor { panic(array!['not supported!']) } @@ -711,19 +702,17 @@ impl I8TensorPartialOrd of PartialOrd> { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 - && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() == 0 - && !is_eq { - is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); - }; + while lhs.data.len() == 0 && !is_eq { + is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index 09900b955..efb681a86 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -65,21 +65,12 @@ impl U32Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { + fn reshape(self: @Tensor, target_shape: Span) -> Tensor { reshape(self, target_shape) } - fn reduce_sum( - self: @Tensor, - axes: Option>, - keepdims: Option, - noop_with_empty_axes: Option - ) -> Tensor { - math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) - } - - fn reduce_sum_single_axis(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum_single_axis::reduce_sum_single_axis(self, axis, keepdims) + fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + math::reduce_sum::reduce_sum(self, axis, keepdims) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { @@ -114,23 +105,23 @@ impl U32Tensor of TensorTrait { panic(array!['not supported!']) } - fn equal(self: @Tensor, other: @Tensor) -> Tensor { + fn equal(self: @Tensor, other: @Tensor) -> Tensor { math::equal::equal(self, other) } - fn greater(self: @Tensor, other: @Tensor) -> Tensor { + fn greater(self: @Tensor, other: @Tensor) -> Tensor { math::greater::greater(self, other) } - fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn greater_equal(self: @Tensor, other: @Tensor) -> Tensor { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } @@ -192,11 +183,11 @@ impl U32Tensor of TensorTrait { panic(array!['not supported!']) } - fn xor(self: @Tensor, other: @Tensor) -> Tensor { + fn xor(self: @Tensor, other: @Tensor) -> Tensor { math::xor::xor(self, other) } - fn or(self: @Tensor, other: @Tensor) -> Tensor { + fn or(self: @Tensor, other: @Tensor) -> Tensor { math::or::or(self, other) } @@ -320,7 +311,7 @@ impl U32Tensor of TensorTrait { core_tensor::clip(self, min, max) } - fn and(self: @Tensor, other: @Tensor) -> Tensor { + fn and(self: @Tensor, other: @Tensor) -> Tensor { math::and::and(self, other) } @@ -420,11 +411,11 @@ impl U32Tensor of TensorTrait { fn is_inf( self: @Tensor, detect_negative: Option, detect_positive: Option - ) -> Tensor { + ) -> Tensor { math::is_inf::is_inf(self, detect_negative, detect_positive) } - fn is_nan(self: @Tensor) -> Tensor { + fn is_nan(self: @Tensor) -> Tensor { panic(array!['not supported!']) } @@ -665,19 +656,17 @@ impl U32TensorPartialOrd of PartialOrd> { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 - && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() != 0 - && is_eq { - is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); - }; + while lhs.data.len() != 0 && is_eq { + is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); + }; is_eq } diff --git a/src/operators/tensor/manipulation/split_to_sequence.cairo b/src/operators/tensor/manipulation/split_to_sequence.cairo index 90e96e542..46dbe1af7 100644 --- a/src/operators/tensor/manipulation/split_to_sequence.cairo +++ b/src/operators/tensor/manipulation/split_to_sequence.cairo @@ -1,4 +1,3 @@ -use core::option::OptionTrait; use orion::operators::tensor::{Tensor, TensorTrait, U32Tensor}; use orion::operators::matrix::{MutMatrixTrait, MutMatrix, MutMatrixImpl}; @@ -46,24 +45,22 @@ fn split_to_sequence, +Drop, +TensorTrait,>( if (keepdims == 0 && !has_split) { let mut splited_t_temp: Array> = array![]; let mut i = 0; - while i != splited_t - .len() { - let mut shape: Array = array![]; - let mut j = 0; - let shape_in_splited: Span = *splited_t.at(i).shape; - while j != shape_in_splited - .len() { - if (j != axis) { - shape.append((*shape_in_splited.at(j)).try_into().unwrap()) - } - - j += 1; - }; - - splited_t_temp.append(splited_t[i].reshape(shape.span())); - i += 1; + while i != splited_t.len() { + let mut shape: Array = array![]; + let mut j = 0; + let shape_in_splited: Span = *splited_t.at(i).shape; + while j != shape_in_splited.len() { + if (j != axis) { + shape.append(*shape_in_splited.at(j)) + } + + j += 1; }; + splited_t_temp.append(splited_t[i].reshape(shape.span())); + i += 1; + }; + return splited_t_temp; } splited_t @@ -108,45 +105,42 @@ fn split_num_outputs, +Drop, +TensorTrait,>( let mut sli: MutMatrix = MutMatrixImpl::new((*t).shape.len(), 2); let mut pos: usize = 0; let mut i = 0; - while i != (*t) - .shape - .len() { - let s: usize = *(*t).shape.at(i); - sli.set(i, 0, 0); - sli.set(i, 1, s); - i += 1; - }; + while i != (*t).shape.len() { + let s: usize = *(*t).shape.at(i); + sli.set(i, 0, 0); + sli.set(i, 1, s); + i += 1; + }; let mut i: usize = 0; - while i != split - .len() { - let spl = *split.at(i); - sli.set(axis, 0, pos); - pos += spl; - sli.set(axis, 1, pos); - - let end_ele_0 = match sli.get(axis, 0) { - Option::Some(res) => res, - Option::None => { - assert(false, 'Get end_ele_0 is failed'); - 0 - }, - }; - let end_ele_1 = match sli.get(axis, 1) { - Option::Some(res) => res, - Option::None => { - assert(false, 'Get end_ele_0 is failed'); - 0 - }, - }; - let starts: Span = array![sli.get(0, 0).unwrap(), end_ele_0].span(); - let ends: Span = array![sli.get(0, 1).unwrap(), end_ele_1].span(); - let axes: Option> = Option::None(()); - let steps: Option> = Option::None(()); - let sub_t: Tensor = t.slice(starts, ends, axes, steps); - splited_t.append(sub_t); - i += 1; + while i != split.len() { + let spl = *split.at(i); + sli.set(axis, 0, pos); + pos += spl; + sli.set(axis, 1, pos); + + let end_ele_0 = match sli.get(axis, 0) { + Option::Some(res) => res, + Option::None => { + assert(false, 'Get end_ele_0 is failed'); + 0 + }, + }; + let end_ele_1 = match sli.get(axis, 1) { + Option::Some(res) => res, + Option::None => { + assert(false, 'Get end_ele_0 is failed'); + 0 + }, }; + let starts: Span = array![sli.get(0, 0).unwrap(), end_ele_0].span(); + let ends: Span = array![sli.get(0, 1).unwrap(), end_ele_1].span(); + let axes: Option> = Option::None(()); + let steps: Option> = Option::None(()); + let sub_t: Tensor = t.slice(starts, ends, axes, steps); + splited_t.append(sub_t); + i += 1; + }; splited_t } @@ -160,46 +154,42 @@ fn split_has_split, +Drop, +TensorTrait,>( let mut sli: MutMatrix = MutMatrixImpl::new((*t).shape.len(), 2); let mut pos: usize = 0; let mut i = 0; - while i != (*t) - .shape - .len() { - let s: usize = *(*t).shape.at(i); - sli.set(i, 0, 0); - sli.set(i, 1, s); - i += 1; - }; + while i != (*t).shape.len() { + let s: usize = *(*t).shape.at(i); + sli.set(i, 0, 0); + sli.set(i, 1, s); + i += 1; + }; let mut i: usize = 0; - while i != split - .data - .len() { - let spl: usize = split.at(indices: array![i].span()); - sli.set(axis, 0, pos); - pos += spl; - sli.set(axis, 1, pos); - - let end_ele_0 = match sli.get(axis, 0) { - Option::Some(res) => { res }, - Option::None => { - assert(false, 'Get end_ele_0 is failed'); - 0 - }, - }; - let end_ele_1 = match sli.get(axis, 1) { - Option::Some(res) => { res }, - Option::None => { - assert(false, 'Get end_ele_0 is failed'); - 0 - }, - }; - let starts: Span = array![sli.get(0, 0).unwrap(), end_ele_0].span(); - let ends: Span = array![sli.get(0, 1).unwrap(), end_ele_1].span(); - let axes: Option> = Option::None(()); - let steps: Option> = Option::None(()); - let sub_t: Tensor = t.slice(starts, ends, axes, steps); - splited_t.append(sub_t); - i += 1; + while i != split.data.len() { + let spl: usize = split.at(indices: array![i].span()); + sli.set(axis, 0, pos); + pos += spl; + sli.set(axis, 1, pos); + + let end_ele_0 = match sli.get(axis, 0) { + Option::Some(res) => { res }, + Option::None => { + assert(false, 'Get end_ele_0 is failed'); + 0 + }, }; + let end_ele_1 = match sli.get(axis, 1) { + Option::Some(res) => { res }, + Option::None => { + assert(false, 'Get end_ele_0 is failed'); + 0 + }, + }; + let starts: Span = array![sli.get(0, 0).unwrap(), end_ele_0].span(); + let ends: Span = array![sli.get(0, 1).unwrap(), end_ele_1].span(); + let axes: Option> = Option::None(()); + let steps: Option> = Option::None(()); + let sub_t: Tensor = t.slice(starts, ends, axes, steps); + splited_t.append(sub_t); + i += 1; + }; splited_t } diff --git a/src/operators/tensor/math.cairo b/src/operators/tensor/math.cairo index fb4813ae1..b73f6d102 100644 --- a/src/operators/tensor/math.cairo +++ b/src/operators/tensor/math.cairo @@ -3,7 +3,6 @@ mod min; mod max_in_tensor; mod max; mod reduce_sum; -mod reduce_sum_single_axis; mod reduce_prod; mod argmax; mod argmin; diff --git a/src/operators/tensor/math/and.cairo b/src/operators/tensor/math/and.cairo index 95f7c9bea..0b1369f35 100644 --- a/src/operators/tensor/math/and.cairo +++ b/src/operators/tensor/math/and.cairo @@ -1,13 +1,13 @@ use orion::numbers::NumberTrait; -use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, BoolTensor, I32Tensor}; +use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, BoolTensor}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility }; /// Cf: TensorTrait::and docstring -fn and(y: @Tensor, z: @Tensor) -> Tensor { +fn and(y: @Tensor, z: @Tensor) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); @@ -18,13 +18,7 @@ fn and(y: @Tensor, z: @Tensor) -> Tensor { let indices_self = broadcast_index_mapping(*y.shape, indices_broadcasted); let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted); - let r = if *(*y.data)[indices_self] && *(*z.data)[indices_other] { - 1 - } else { - 0 - }; - - result.append(r); + result.append(*(*y.data)[indices_self] && *(*z.data)[indices_other]); n += 1; }; diff --git a/src/operators/tensor/math/equal.cairo b/src/operators/tensor/math/equal.cairo index 96cf68329..d2693acf9 100644 --- a/src/operators/tensor/math/equal.cairo +++ b/src/operators/tensor/math/equal.cairo @@ -1,4 +1,4 @@ -use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, I32Tensor}; +use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility }; @@ -6,14 +6,15 @@ use orion::operators::tensor::helpers::{ /// Cf: TensorTrait::equal docstring fn equal< T, + impl UsizeFTensor: TensorTrait, impl TPartialEq: PartialEq, impl TCopy: Copy, impl TDrop: Drop >( y: @Tensor, z: @Tensor -) -> Tensor { +) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); diff --git a/src/operators/tensor/math/flatten.cairo b/src/operators/tensor/math/flatten.cairo index 82886a3ff..a23671b77 100644 --- a/src/operators/tensor/math/flatten.cairo +++ b/src/operators/tensor/math/flatten.cairo @@ -23,11 +23,5 @@ fn flatten>(self: @Tensor, axis: usize) let new_shape_second_axis = (*self.data).len() / new_shape_first_axis; - self - .reshape( - array![ - new_shape_first_axis.try_into().unwrap(), new_shape_second_axis.try_into().unwrap() - ] - .span() - ) + self.reshape(array![new_shape_first_axis, new_shape_second_axis].span()) } diff --git a/src/operators/tensor/math/greater.cairo b/src/operators/tensor/math/greater.cairo index c6ff275af..f90462b22 100644 --- a/src/operators/tensor/math/greater.cairo +++ b/src/operators/tensor/math/greater.cairo @@ -1,4 +1,4 @@ -use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, I32Tensor}; +use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility }; @@ -6,14 +6,15 @@ use orion::operators::tensor::helpers::{ /// Cf: TensorTrait::greater docstring fn greater< T, + impl UsizeFTensor: TensorTrait, impl TPartialOrd: PartialOrd, impl TCopy: Copy, impl TDrop: Drop >( y: @Tensor, z: @Tensor -) -> Tensor { +) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); diff --git a/src/operators/tensor/math/greater_equal.cairo b/src/operators/tensor/math/greater_equal.cairo index efd7fb8a9..bc8e1b045 100644 --- a/src/operators/tensor/math/greater_equal.cairo +++ b/src/operators/tensor/math/greater_equal.cairo @@ -1,4 +1,4 @@ -use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, I32Tensor}; +use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility }; @@ -6,14 +6,15 @@ use orion::operators::tensor::helpers::{ /// Cf: TensorTrait::greater_equal docstring fn greater_equal< T, + impl UsizeFTensor: TensorTrait, impl TPartialOrd: PartialOrd, impl TCopy: Copy, impl TDrop: Drop >( y: @Tensor, z: @Tensor -) -> Tensor { +) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); diff --git a/src/operators/tensor/math/is_inf.cairo b/src/operators/tensor/math/is_inf.cairo index 147d60870..021b10732 100644 --- a/src/operators/tensor/math/is_inf.cairo +++ b/src/operators/tensor/math/is_inf.cairo @@ -1,6 +1,6 @@ use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; -use orion::operators::tensor::U32Tensor; +use orion::operators::tensor::implementations::tensor_bool::BoolTensor; /// Cf: TensorTrait::is_inf docstring fn is_inf< @@ -12,7 +12,7 @@ fn is_inf< impl TDrop: Drop >( x: @Tensor, detect_negative: Option, detect_positive: Option -) -> Tensor { +) -> Tensor { let neg_opt = match detect_negative { Option::Some(val) => { if val == 0 { 0 @@ -32,7 +32,7 @@ fn is_inf< }; if neg_opt == 0 && pos_opt == 0 { - return TensorTrait::new(*x.shape, ArrayTrait::::new().span()); + return TensorTrait::new(*x.shape, ArrayTrait::::new().span()); } if neg_opt == 0 && pos_opt == 1 { @@ -43,17 +43,11 @@ fn is_inf< return is_neg_inf(x); } - let mut data_result: Array = array![]; + let mut data_result: Array = array![]; let mut y: Span = *x.data; loop { match y.pop_front() { - Option::Some(item) => { - if (*item).is_inf() { - data_result.append(1); - } else { - data_result.append(0); - } - }, + Option::Some(item) => { data_result.append((*item).is_inf()); }, Option::None => { break; } }; }; @@ -71,18 +65,12 @@ fn is_pos_inf< impl TDrop: Drop >( x: @Tensor -) -> Tensor { - let mut data_result: Array = array![]; +) -> Tensor { + let mut data_result: Array = array![]; let mut y: Span = *x.data; loop { match y.pop_front() { - Option::Some(item) => { - if (*item).is_pos_inf() { - data_result.append(1); - } else { - data_result.append(0); - } - }, + Option::Some(item) => { data_result.append((*item).is_pos_inf()); }, Option::None => { break; } }; }; @@ -100,18 +88,12 @@ fn is_neg_inf< impl TDrop: Drop >( x: @Tensor -) -> Tensor { - let mut data_result: Array = array![]; +) -> Tensor { + let mut data_result: Array = array![]; let mut y: Span = *x.data; loop { match y.pop_front() { - Option::Some(item) => { - if (*item).is_neg_inf() { - data_result.append(1); - } else { - data_result.append(0); - } - }, + Option::Some(item) => { data_result.append((*item).is_neg_inf()); }, Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/is_nan.cairo b/src/operators/tensor/math/is_nan.cairo index 774c29d5b..2f1818a81 100644 --- a/src/operators/tensor/math/is_nan.cairo +++ b/src/operators/tensor/math/is_nan.cairo @@ -1,6 +1,6 @@ use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; -use orion::operators::tensor::U32Tensor; +use orion::operators::tensor::implementations::tensor_bool::BoolTensor; /// Cf: TensorTrait::is_nan docstring fn is_nan< @@ -12,18 +12,12 @@ fn is_nan< impl TDrop: Drop >( x: @Tensor -) -> Tensor { - let mut data_result: Array = array![]; +) -> Tensor { + let mut data_result: Array = array![]; let mut y: Span = *x.data; loop { match y.pop_front() { - Option::Some(item) => { - if (*item).is_nan() { - data_result.append(1); - } else { - data_result.append(0); - } - }, + Option::Some(item) => { data_result.append((*item).is_nan()); }, Option::None => { break; } }; }; diff --git a/src/operators/tensor/math/layer_normalization.cairo b/src/operators/tensor/math/layer_normalization.cairo index 070e9da08..e61e826f5 100644 --- a/src/operators/tensor/math/layer_normalization.cairo +++ b/src/operators/tensor/math/layer_normalization.cairo @@ -1,12 +1,9 @@ -use core::option::OptionTrait; -use core::array::SpanTrait; use orion::numbers::{NumberTrait, I32IntoU32}; use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; use orion::operators::tensor::{ TensorTrait, Tensor, I8Tensor, I32Tensor, U32Tensor, FP16x16Tensor, BoolTensor }; use orion::operators::vec::{VecTrait, NullableVec, NullableVecImpl}; -use orion::operators::tensor::math::reduce_sum_single_axis::reduce_sum_single_axis; /// Cf: TensorTrait::layer_normalization docstring fn layer_normalization< @@ -15,7 +12,6 @@ fn layer_normalization< +TensorTrait, +NumberTrait, +PartialEq, - +AddEq, +Copy, +Drop, +Div>, @@ -76,8 +72,8 @@ fn layer_normalization< }; let mut shape_matrix = array![]; - shape_matrix.append(row_number.try_into().unwrap()); - shape_matrix.append(col_number.try_into().unwrap()); + shape_matrix.append(row_number); + shape_matrix.append(col_number); // Shape [1, 1] to mutiply one element tensors with 2D matrices let mut shape_one = array![]; @@ -94,13 +90,13 @@ fn layer_normalization< one_tensor.append(NumberTrait::one()); let x_mat = self.reshape(shape_matrix.span()); - let x_mean = reduce_sum_single_axis(@x_mat, 1, true) + let x_mean = x_mat.reduce_sum(1, true) / TensorTrait::new(shape_one.span(), col_number_tensor.span()); let x_diff = x_mat - x_mean; let x_squared_diff = x_diff * x_diff; - let variance = reduce_sum_single_axis(@x_squared_diff, 1, true) + let variance = x_squared_diff.reduce_sum(1, true) / TensorTrait::new(shape_one.span(), col_number_tensor.span()); let variance_eps = variance + TensorTrait::new(shape_one.span(), epsilon_tensor.span()); @@ -130,17 +126,7 @@ fn layer_normalization< *scale }; - let mut target_shape: Array = array![]; - let mut i = 0; - while i < (*self) - .shape - .len() { - target_shape.append((*(*self).shape.at(i)).try_into().unwrap()); - - i += 1; - }; - - let Y = y_mat.reshape(target_shape.span()) * scale; + let Y = y_mat.reshape((*self).shape) * scale; let Y = match B { Option::Some(B) => { diff --git a/src/operators/tensor/math/less.cairo b/src/operators/tensor/math/less.cairo index e590404f6..35f9b4d73 100644 --- a/src/operators/tensor/math/less.cairo +++ b/src/operators/tensor/math/less.cairo @@ -1,4 +1,4 @@ -use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, I32Tensor}; +use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility }; @@ -6,14 +6,15 @@ use orion::operators::tensor::helpers::{ /// Cf: TensorTrait::less docstring fn less< T, + impl UsizeFTensor: TensorTrait, impl TPartialOrd: PartialOrd, impl TCopy: Copy, impl TDrop: Drop >( y: @Tensor, z: @Tensor -) -> Tensor { +) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); diff --git a/src/operators/tensor/math/less_equal.cairo b/src/operators/tensor/math/less_equal.cairo index dea786878..8c982a09c 100644 --- a/src/operators/tensor/math/less_equal.cairo +++ b/src/operators/tensor/math/less_equal.cairo @@ -1,4 +1,4 @@ -use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, I32Tensor}; +use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility }; @@ -6,14 +6,15 @@ use orion::operators::tensor::helpers::{ /// Cf: TensorTrait::less_equal docstring fn less_equal< T, + impl UsizeFTensor: TensorTrait, impl TPartialOrd: PartialOrd, impl TCopy: Copy, impl TDrop: Drop >( y: @Tensor, z: @Tensor -) -> Tensor { +) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); diff --git a/src/operators/tensor/math/or.cairo b/src/operators/tensor/math/or.cairo index 0b93e0400..13b4697a3 100644 --- a/src/operators/tensor/math/or.cairo +++ b/src/operators/tensor/math/or.cairo @@ -1,5 +1,5 @@ use orion::numbers::NumberTrait; -use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, I32Tensor}; +use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility }; @@ -9,13 +9,14 @@ fn or< T, MAG, impl TNumber: NumberTrait, + impl UsizeFTensor: TensorTrait, impl TCopy: Copy, impl TDrop: Drop >( y: @Tensor, z: @Tensor -) -> Tensor { +) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); diff --git a/src/operators/tensor/math/reduce_l1.cairo b/src/operators/tensor/math/reduce_l1.cairo index 8322af094..ba2be9215 100644 --- a/src/operators/tensor/math/reduce_l1.cairo +++ b/src/operators/tensor/math/reduce_l1.cairo @@ -1,7 +1,6 @@ use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; -use orion::operators::tensor::math::reduce_sum_single_axis::reduce_sum_single_axis; /// Cf: TensorTrait::reduce_sum docstring fn reduce_l1< @@ -17,5 +16,5 @@ fn reduce_l1< ) -> Tensor { let data_abs = self.abs(); - reduce_sum_single_axis(@data_abs, axis: axis, keepdims: keepdims) + data_abs.reduce_sum(axis: axis, keepdims: keepdims) } diff --git a/src/operators/tensor/math/reduce_l2.cairo b/src/operators/tensor/math/reduce_l2.cairo index cf5279df2..96f4b7245 100644 --- a/src/operators/tensor/math/reduce_l2.cairo +++ b/src/operators/tensor/math/reduce_l2.cairo @@ -3,7 +3,6 @@ use core::debug::PrintTrait; use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; -use orion::operators::tensor::math::reduce_sum_single_axis::reduce_sum_single_axis; fn square< T, @@ -41,14 +40,13 @@ fn reduce_l2< impl TTensor: TensorTrait, impl TNumber: NumberTrait, impl TMul: Mul, - impl TAddEq: AddEq, impl TCopy: Copy, impl TDrop: Drop, >( self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { let tensor_square = square(self); - let tensor_square_sum = reduce_sum_single_axis(@tensor_square, axis: axis, keepdims: keepdims); + let tensor_square_sum = tensor_square.reduce_sum(axis: axis, keepdims: keepdims); tensor_square_sum.sqrt() } @@ -59,7 +57,6 @@ fn reduce_l2_complex< impl TTensor: TensorTrait, impl TNumber: NumberTrait, impl TMul: Mul, - impl TAddEq: AddEq, impl TCopy: Copy, impl TDrop: Drop, impl TPrint: PrintTrait @@ -67,9 +64,7 @@ fn reduce_l2_complex< self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { let mut tensor_square = square(@self.abs()); - let mut tensor_square_sum = reduce_sum_single_axis( - @tensor_square, axis: axis, keepdims: keepdims - ); + let mut tensor_square_sum = tensor_square.reduce_sum(axis: axis, keepdims: keepdims); tensor_square_sum.sqrt() } diff --git a/src/operators/tensor/math/reduce_log_sum.cairo b/src/operators/tensor/math/reduce_log_sum.cairo index 8911b1e04..60a5225cb 100644 --- a/src/operators/tensor/math/reduce_log_sum.cairo +++ b/src/operators/tensor/math/reduce_log_sum.cairo @@ -1,7 +1,6 @@ use orion::numbers::NumberTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; -use orion::operators::tensor::math::reduce_sum_single_axis::reduce_sum_single_axis; /// Cf: TensorTrait::reduce_sum_square docstring fn reduce_log_sum< @@ -16,7 +15,7 @@ fn reduce_log_sum< >( self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { - let tensor_square_sum = reduce_sum_single_axis(self, axis: axis, keepdims: keepdims); + let tensor_square_sum = self.reduce_sum(axis: axis, keepdims: keepdims); let tensor_square_sum_log = tensor_square_sum.log(); tensor_square_sum_log diff --git a/src/operators/tensor/math/reduce_sum.cairo b/src/operators/tensor/math/reduce_sum.cairo index 66d5aea5b..078345f4a 100644 --- a/src/operators/tensor/math/reduce_sum.cairo +++ b/src/operators/tensor/math/reduce_sum.cairo @@ -1,12 +1,6 @@ -use alexandria_sorting::bubble_sort; -use alexandria_data_structures::array_ext::{SpanTraitExt}; - -use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; -use orion::operators::tensor::helpers::{ - reduce_output_shape, len_from_shape, combine_indices, get_all_axes -}; +use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; /// Cf: TensorTrait::reduce_sum docstring fn reduce_sum< @@ -14,98 +8,48 @@ fn reduce_sum< MAG, impl TTensor: TensorTrait, impl TNumber: NumberTrait, + impl TAddEq: AddEq, impl TCopy: Copy, impl TDrop: Drop >( - self: @Tensor, - axes: Option>, - keepdims: Option, - noop_with_empty_axes: Option + self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { - let noop_with_empty_axes = match noop_with_empty_axes { - Option::Some(noop_with_empty_axes) => noop_with_empty_axes, - Option::None => false, - }; - let axes = match axes { - Option::Some(axes) => { - if (axes.len() == 0) { - get_all_axes(*self.shape) - } else { - assert(axes.len() == axes.unique().len(), 'duplicated axis.'); - let mut axes_arr: Array = array![]; - let mut copy_axes = axes; - loop { - match copy_axes.pop_front() { - Option::Some(axis) => { axes_arr.append(*axis); }, - Option::None => { break; } - }; - }; - let sorted_axes = bubble_sort::bubble_sort_elements(axes_arr, true).span(); - sorted_axes - } - }, - Option::None => { - if noop_with_empty_axes { - return *self; - } - get_all_axes(*self.shape) - }, - }; - let keepdims = match keepdims { - Option::Some(keepdims) => keepdims, - Option::None => true, - }; + let mut output_data: Array = array![]; - let mut axis_c = 0; - let mut copy_axes = axes; - let mut shape = *self.shape; - let mut data = *self.data; - loop { - match copy_axes.pop_front() { - Option::Some(axis) => { - if (shape.len() == 1) { - let current_sum = accumulate_sum::(data, shape, shape, 0); - shape = array![].span(); - data = array![current_sum].span(); - break (); - } - let mut temp_data = array![]; - let mut temp_shape = reduce_output_shape(shape, *axis - axis_c, false); - let data_len = len_from_shape(temp_shape); - let mut index: usize = 0; - while index != data_len { - let indices = unravel_index(index, temp_shape); - let current_sum = accumulate_sum::(data, shape, indices, *axis - axis_c); + if (*self.shape).len() == 1 { + assert(axis == 0, 'axis out of dimensions'); + let current_sum = accumulate_sum::(*self.data, *self.shape, *self.shape, axis); + output_data.append(current_sum); - temp_data.append(current_sum); + let mut output_shape: Array = array![]; + output_shape.append(1); - index += 1; - }; + return TensorTrait::new(output_shape.span(), output_data.span()); + } else { + assert(axis <= (*self.shape).len(), 'axis out of dimensions'); + let output_shape = reduce_output_shape(*self.shape, axis, false); + let output_data_len = len_from_shape(output_shape); + let mut index: usize = 0; + while index != output_data_len { + let output_indices = unravel_index(index, output_shape); + let current_sum = accumulate_sum::(*self.data, *self.shape, output_indices, axis); - shape = temp_shape; - data = temp_data.span(); - axis_c += 1; - }, - Option::None => { break; } - }; - }; + output_data.append(current_sum); - let mut axes_copy = axes; - if keepdims { - shape = *self.shape; - loop { - match axes_copy.pop_front() { - Option::Some(axis) => { shape = reduce_output_shape(shape, *axis, true); }, - Option::None => { break; } - }; + index += 1; }; - TensorTrait::::new(shape, data) - } else { - TensorTrait::::new(shape, data) + if keepdims { + let output_shape = reduce_output_shape(*self.shape, axis, true); + + TensorTrait::::new(output_shape, output_data.span()) + } else { + TensorTrait::::new(output_shape, output_data.span()) + } } } + /// Helper function that accumulates the sum of elements along a specific axis. /// /// # Arguments @@ -118,35 +62,42 @@ fn reduce_sum< /// * Panics if gas limit is exceeded during execution. /// /// # Returns -/// * A value representing the accumulated sum along the specified axis. +/// * An i32 value representing the accumulated sum along the specified axis. fn accumulate_sum< - T, MAG, impl TNumber: NumberTrait, - impl TCopy: Copy, impl TDrop: Drop + T, + MAG, + impl TNumber: NumberTrait, + impl TAddEq: AddEq, + impl TCopy: Copy, + impl TDrop: Drop >( mut input_data: Span, input_shape: Span, output_indices: Span, axis: usize ) -> T { let axis_len = *(input_shape)[axis]; - let mut sum: T = NumberTrait::zero(); + let mut acc: T = NumberTrait::zero(); - let mut axis_index = 0; + let mut axis_index: usize = 0; if (input_shape).len() > 1 { - while axis_index != axis_len { + loop { + if axis_index == axis_len { + break (); + } + let input_indices = combine_indices(output_indices, axis_index, axis); let input_index = ravel_index(input_shape, input_indices); let ele = *(input_data)[input_index]; - sum = NumberTrait::add(sum, ele); - + acc += ele; axis_index += 1; }; } else { loop { match input_data.pop_front() { - Option::Some(item) => sum = NumberTrait::add(sum, *item), + Option::Some(item) => { acc += *item; }, Option::None => { break; } }; }; } - sum + return acc; } diff --git a/src/operators/tensor/math/reduce_sum_single_axis.cairo b/src/operators/tensor/math/reduce_sum_single_axis.cairo deleted file mode 100644 index 75ceb8bf8..000000000 --- a/src/operators/tensor/math/reduce_sum_single_axis.cairo +++ /dev/null @@ -1,106 +0,0 @@ -use core::array::SpanTrait; -use core::option::OptionTrait; -use orion::numbers::NumberTrait; -use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; -use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; - - -fn reduce_sum_single_axis< - T, - MAG, - impl TTensor: TensorTrait, - impl TNumber: NumberTrait, - impl TAddEq: AddEq, - impl TCopy: Copy, - impl TDrop: Drop ->( - self: @Tensor, axis: usize, keepdims: bool -) -> Tensor { - let mut output_data: Array = array![]; - - if (*self.shape).len() == 1 { - assert(axis == 0, 'axis out of dimensions'); - let current_sum = accumulate_sum::(*self.data, *self.shape, *self.shape, axis); - output_data.append(current_sum); - - let mut output_shape: Array = array![]; - output_shape.append(1); - - return TensorTrait::new(output_shape.span(), output_data.span()); - } else { - assert(axis <= (*self.shape).len(), 'axis out of dimensions'); - let output_shape = reduce_output_shape(*self.shape, axis, false); - let output_data_len = len_from_shape(output_shape); - let mut index: usize = 0; - while index != output_data_len { - let output_indices = unravel_index(index, output_shape); - let current_sum = accumulate_sum::(*self.data, *self.shape, output_indices, axis); - - output_data.append(current_sum); - - index += 1; - }; - - if keepdims { - let output_shape = reduce_output_shape(*self.shape, axis, true); - - TensorTrait::::new(output_shape, output_data.span()) - } else { - TensorTrait::::new(output_shape, output_data.span()) - } - } -} - - -/// Helper function that accumulates the sum of elements along a specific axis. -/// -/// # Arguments -/// * `input_data` - The input's data. -/// * `input_shape` - The input's shape. -/// * `output_indices` - A span of output indices. -/// * `axis` - The axis along which to accumulate the sum. -/// -/// # Panics -/// * Panics if gas limit is exceeded during execution. -/// -/// # Returns -/// * An i32 value representing the accumulated sum along the specified axis. -fn accumulate_sum< - T, - MAG, - impl TNumber: NumberTrait, - impl TAddEq: AddEq, - impl TCopy: Copy, - impl TDrop: Drop ->( - mut input_data: Span, input_shape: Span, output_indices: Span, axis: usize -) -> T { - let axis_len = *(input_shape)[axis]; - let mut acc: T = NumberTrait::zero(); - - let mut axis_index: usize = 0; - - if (input_shape).len() > 1 { - loop { - if axis_index == axis_len { - break (); - } - - let input_indices = combine_indices(output_indices, axis_index, axis); - let input_index = ravel_index(input_shape, input_indices); - let ele = *(input_data)[input_index]; - acc += ele; - axis_index += 1; - }; - } else { - loop { - match input_data.pop_front() { - Option::Some(item) => { acc += *item; }, - Option::None => { break; } - }; - }; - } - - return acc; -} - diff --git a/src/operators/tensor/math/reduce_sum_square.cairo b/src/operators/tensor/math/reduce_sum_square.cairo index 3b9cd5e2b..b8ad7df99 100644 --- a/src/operators/tensor/math/reduce_sum_square.cairo +++ b/src/operators/tensor/math/reduce_sum_square.cairo @@ -1,7 +1,6 @@ use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; use orion::numbers::fixed_point::core::FixedTrait; -use orion::operators::tensor::math::reduce_sum_single_axis::reduce_sum_single_axis; fn square< T, @@ -46,7 +45,7 @@ fn reduce_sum_square< self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { let tensor_square = square(self); - let tensor_square_sum = reduce_sum_single_axis(@tensor_square, axis: axis, keepdims: keepdims); + let tensor_square_sum = tensor_square.reduce_sum(axis: axis, keepdims: keepdims); tensor_square_sum } diff --git a/src/operators/tensor/math/xor.cairo b/src/operators/tensor/math/xor.cairo index b10291b85..7ed06eba5 100644 --- a/src/operators/tensor/math/xor.cairo +++ b/src/operators/tensor/math/xor.cairo @@ -1,5 +1,5 @@ use orion::numbers::NumberTrait; -use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, I32Tensor}; +use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility }; @@ -9,13 +9,14 @@ fn xor< T, MAG, impl TNumber: NumberTrait, + impl UsizeFTensor: TensorTrait, impl TCopy: Copy, impl TDrop: Drop >( y: @Tensor, z: @Tensor -) -> Tensor { +) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 836741819..29bebb762 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -278,6 +278,26 @@ mod or_i8; mod or_i8_broadcast; mod or_u32; mod or_u32_broadcast; +mod reduce_sum_fp16x16_1D; +mod reduce_sum_fp16x16_2D_default; +mod reduce_sum_fp16x16_2D_keepdims; +mod reduce_sum_fp16x16_2D_axis_1; +mod reduce_sum_fp8x23_1D; +mod reduce_sum_fp8x23_2D_default; +mod reduce_sum_fp8x23_2D_keepdims; +mod reduce_sum_fp8x23_2D_axis_1; +mod reduce_sum_i32_1D; +mod reduce_sum_i32_2D_default; +mod reduce_sum_i32_2D_keepdims; +mod reduce_sum_i32_2D_axis_1; +mod reduce_sum_i8_1D; +mod reduce_sum_i8_2D_default; +mod reduce_sum_i8_2D_keepdims; +mod reduce_sum_i8_2D_axis_1; +mod reduce_sum_u32_1D; +mod reduce_sum_u32_2D_default; +mod reduce_sum_u32_2D_keepdims; +mod reduce_sum_u32_2D_axis_1; mod relu_fp16x16; mod relu_fp8x23; mod relu_i32; @@ -784,10 +804,21 @@ mod concat_from_sequence_i8_new_axis_default; mod concat_from_sequence_u32_new_axis_zero; mod concat_from_sequence_u32_new_axis_one; mod concat_from_sequence_u32_new_axis_default; -// mod is_nan_fp16x16; -// mod is_inf_i32; -// mod is_pos_inf_i32; -// mod is_neg_inf_i32; +mod is_nan_fp16x16; +mod is_nan_fp8x23; +mod is_inf_fp16x16; +mod is_inf_fp8x23; +mod is_inf_i32; +mod is_inf_i8; +mod is_inf_u32; +mod is_pos_inf_fp16x16; +mod is_neg_inf_fp16x16; +mod is_pos_inf_fp8x23; +mod is_neg_inf_fp8x23; +mod is_pos_inf_i32; +mod is_neg_inf_i32; +mod is_pos_inf_i8; +mod is_neg_inf_i8; mod reduce_log_sum_fp8x23_export_do_not_keepdims; mod reduce_log_sum_fp8x23_export_keepdims; mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; @@ -1016,37 +1047,3 @@ mod label_encoder_fp8x23_default; mod label_encoder_i8_default; mod label_encoder_i32_default; mod label_encoder_u32_default; -mod reduce_sum_single_axis_fp16x16_1D; -mod reduce_sum_single_axis_fp16x16_2D_default; -mod reduce_sum_single_axis_fp16x16_2D_keepdims; -mod reduce_sum_single_axis_fp16x16_2D_axis_1; -mod reduce_sum_single_axis_fp8x23_1D; -mod reduce_sum_single_axis_fp8x23_2D_default; -mod reduce_sum_single_axis_fp8x23_2D_keepdims; -mod reduce_sum_single_axis_fp8x23_2D_axis_1; -mod reduce_sum_single_axis_i32_1D; -mod reduce_sum_single_axis_i32_2D_default; -mod reduce_sum_single_axis_i32_2D_keepdims; -mod reduce_sum_single_axis_i32_2D_axis_1; -mod reduce_sum_single_axis_i8_1D; -mod reduce_sum_single_axis_i8_2D_default; -mod reduce_sum_single_axis_i8_2D_keepdims; -mod reduce_sum_single_axis_i8_2D_axis_1; -mod reduce_sum_single_axis_u32_1D; -mod reduce_sum_single_axis_u32_2D_default; -mod reduce_sum_single_axis_u32_2D_keepdims; -mod reduce_sum_single_axis_u32_2D_axis_1; -mod reduce_sum_keep_dims; -mod reduce_sum_no_keep_dims; -mod reduce_sum_default_axes_keepdims; -mod reduce_sum_empty_axes_input_noop; -mod and_bool_broadcast; -mod reshape_extended_dims; -mod reshape_negative_dim; -mod reshape_negative_extended_dims; -mod reshape_one_dim; -mod reshape_reduced_dims; -mod reshape_reordered_all_dims; -mod reshape_reordered_last_dims; -mod reshape_zero_and_negative_dim; -mod reshape_zero_dim; diff --git a/tests/nodes/and_bool.cairo b/tests/nodes/and_bool.cairo index c9af2ad8c..223240abe 100644 --- a/tests/nodes/and_bool.cairo +++ b/tests/nodes/and_bool.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::BoolTensor; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::BoolTensor; #[test] #[available_gas(2000000000)] fn test_and_bool() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = BoolTensor::and(@input_0, @input_1); + let y = BoolTensor::and(@input_0, @input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/and_bool/input_0.cairo b/tests/nodes/and_bool/input_0.cairo index 4789edaac..881c7e8ea 100644 --- a/tests/nodes/and_bool/input_0.cairo +++ b/tests/nodes/and_bool/input_0.cairo @@ -8,17 +8,17 @@ fn input_0() -> Tensor { shape.append(4); let mut data = ArrayTrait::new(); - data.append(true); - data.append(false); - data.append(false); data.append(false); data.append(true); - data.append(false); + data.append(true); + data.append(true); + data.append(true); data.append(true); data.append(true); data.append(true); data.append(false); data.append(false); - data.append(false); + data.append(true); + data.append(true); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/and_bool/input_1.cairo b/tests/nodes/and_bool/input_1.cairo index 9eba836e3..e26f3717a 100644 --- a/tests/nodes/and_bool/input_1.cairo +++ b/tests/nodes/and_bool/input_1.cairo @@ -8,11 +8,9 @@ fn input_1() -> Tensor { shape.append(4); let mut data = ArrayTrait::new(); - data.append(true); data.append(false); data.append(false); data.append(true); - data.append(true); data.append(false); data.append(true); data.append(false); @@ -20,5 +18,7 @@ fn input_1() -> Tensor { data.append(false); data.append(false); data.append(true); + data.append(true); + data.append(true); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/and_bool/output_0.cairo b/tests/nodes/and_bool/output_0.cairo index 20c2ce71d..e961a4093 100644 --- a/tests/nodes/and_bool/output_0.cairo +++ b/tests/nodes/and_bool/output_0.cairo @@ -1,25 +1,24 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(4); let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(true); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/and_bool_broadcast.cairo b/tests/nodes/and_bool_broadcast.cairo index dd58790b0..1ef34c86b 100644 --- a/tests/nodes/and_bool_broadcast.cairo +++ b/tests/nodes/and_bool_broadcast.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::BoolTensor; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::BoolTensor; #[test] #[available_gas(2000000000)] fn test_and_bool_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = BoolTensor::and(@input_0, @input_1); + let y = BoolTensor::and(@input_0, @input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/and_bool_broadcast/input_0.cairo b/tests/nodes/and_bool_broadcast/input_0.cairo index 63959e870..56fdd1103 100644 --- a/tests/nodes/and_bool_broadcast/input_0.cairo +++ b/tests/nodes/and_bool_broadcast/input_0.cairo @@ -10,61 +10,61 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(true); - data.append(false); - data.append(true); data.append(true); data.append(false); data.append(false); data.append(false); - data.append(true); - data.append(true); data.append(false); data.append(false); data.append(true); + data.append(false); data.append(true); data.append(false); data.append(false); - data.append(true); data.append(false); - data.append(true); - data.append(true); + data.append(false); data.append(true); data.append(false); data.append(false); data.append(false); data.append(true); - data.append(true); data.append(false); data.append(false); data.append(false); data.append(true); + data.append(false); data.append(true); data.append(true); data.append(false); data.append(false); - data.append(true); + data.append(false); data.append(true); data.append(true); data.append(true); data.append(false); data.append(false); data.append(false); - data.append(true); - data.append(true); + data.append(false); + data.append(false); data.append(false); data.append(false); data.append(false); data.append(true); + data.append(false); data.append(true); data.append(false); + data.append(false); data.append(true); data.append(true); data.append(false); data.append(true); data.append(false); + data.append(false); + data.append(false); + data.append(false); data.append(true); data.append(true); - data.append(true); + data.append(false); data.append(true); data.append(true); data.append(true); diff --git a/tests/nodes/and_bool_broadcast/input_1.cairo b/tests/nodes/and_bool_broadcast/input_1.cairo index f855f8f1f..8da43cc44 100644 --- a/tests/nodes/and_bool_broadcast/input_1.cairo +++ b/tests/nodes/and_bool_broadcast/input_1.cairo @@ -9,11 +9,15 @@ fn input_1() -> Tensor { shape.append(5); let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(true); data.append(true); data.append(false); - data.append(false); + data.append(true); + data.append(true); data.append(false); data.append(false); data.append(true); @@ -21,11 +25,17 @@ fn input_1() -> Tensor { data.append(true); data.append(false); data.append(true); + data.append(true); + data.append(false); data.append(false); data.append(true); data.append(false); + data.append(false); data.append(true); data.append(false); + data.append(false); + data.append(false); + data.append(true); data.append(true); data.append(true); data.append(true); @@ -33,41 +43,31 @@ fn input_1() -> Tensor { data.append(false); data.append(true); data.append(true); - data.append(false); data.append(true); - data.append(false); data.append(true); data.append(false); data.append(true); - data.append(true); + data.append(false); + data.append(false); data.append(false); data.append(true); data.append(true); data.append(true); data.append(true); data.append(true); + data.append(false); data.append(true); + data.append(false); data.append(true); data.append(true); - data.append(false); - data.append(false); - data.append(false); data.append(true); data.append(false); data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); data.append(true); data.append(false); data.append(false); data.append(false); data.append(true); - data.append(true); - data.append(true); - data.append(true); data.append(false); - data.append(true); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/and_bool_broadcast/output_0.cairo b/tests/nodes/and_bool_broadcast/output_0.cairo index 583a3ddab..e12ed574d 100644 --- a/tests/nodes/and_bool_broadcast/output_0.cairo +++ b/tests/nodes/and_bool_broadcast/output_0.cairo @@ -1,74 +1,73 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(4); shape.append(5); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(0); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16.cairo b/tests/nodes/equal_fp16x16.cairo index 0e2796da9..38c3753cd 100644 --- a/tests/nodes/equal_fp16x16.cairo +++ b/tests/nodes/equal_fp16x16.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; +use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; #[test] #[available_gas(2000000000)] fn test_equal_fp16x16() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.equal(@input_1); + let y = input_0.equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/equal_fp16x16/input_0.cairo b/tests/nodes/equal_fp16x16/input_0.cairo index fa5ce11ff..1c0bdb213 100644 --- a/tests/nodes/equal_fp16x16/input_0.cairo +++ b/tests/nodes/equal_fp16x16/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16/input_1.cairo b/tests/nodes/equal_fp16x16/input_1.cairo index 13878efc5..c6e8fe0f6 100644 --- a/tests/nodes/equal_fp16x16/input_1.cairo +++ b/tests/nodes/equal_fp16x16/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -11,31 +11,31 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16/output_0.cairo b/tests/nodes/equal_fp16x16/output_0.cairo index 102a86146..2078b6e18 100644 --- a/tests/nodes/equal_fp16x16/output_0.cairo +++ b/tests/nodes/equal_fp16x16/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -13,29 +12,29 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(0); data.append(0); data.append(0); - data.append(1); - data.append(1); data.append(0); data.append(0); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(0); data.append(0); data.append(0); data.append(0); data.append(0); - data.append(1); data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16_broadcast.cairo b/tests/nodes/equal_fp16x16_broadcast.cairo index d5247e8ca..74eb5217e 100644 --- a/tests/nodes/equal_fp16x16_broadcast.cairo +++ b/tests/nodes/equal_fp16x16_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; +use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; #[test] #[available_gas(2000000000)] fn test_equal_fp16x16_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.equal(@input_1); + let y = input_0.equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/equal_fp16x16_broadcast/input_0.cairo b/tests/nodes/equal_fp16x16_broadcast/input_0.cairo index 2be78eac4..a378b6d18 100644 --- a/tests/nodes/equal_fp16x16_broadcast/input_0.cairo +++ b/tests/nodes/equal_fp16x16_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16_broadcast/input_1.cairo b/tests/nodes/equal_fp16x16_broadcast/input_1.cairo index 1d2d646c2..9a7b2b64d 100644 --- a/tests/nodes/equal_fp16x16_broadcast/input_1.cairo +++ b/tests/nodes/equal_fp16x16_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -10,6 +10,6 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp16x16_broadcast/output_0.cairo b/tests/nodes/equal_fp16x16_broadcast/output_0.cairo index 6e83f693f..d2fab9fd0 100644 --- a/tests/nodes/equal_fp16x16_broadcast/output_0.cairo +++ b/tests/nodes/equal_fp16x16_broadcast/output_0.cairo @@ -1,17 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); data.append(1); data.append(0); data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp8x23.cairo b/tests/nodes/equal_fp8x23.cairo index 1f5c1a9dc..112c71e8b 100644 --- a/tests/nodes/equal_fp8x23.cairo +++ b/tests/nodes/equal_fp8x23.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::U32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::FP8x23TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; #[test] #[available_gas(2000000000)] fn test_equal_fp8x23() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.equal(@input_1); + let y = input_0.equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/equal_fp8x23/input_0.cairo b/tests/nodes/equal_fp8x23/input_0.cairo index 55009dd09..4f138ecb5 100644 --- a/tests/nodes/equal_fp8x23/input_0.cairo +++ b/tests/nodes/equal_fp8x23/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp8x23/input_1.cairo b/tests/nodes/equal_fp8x23/input_1.cairo index fe7d75198..b0010c344 100644 --- a/tests/nodes/equal_fp8x23/input_1.cairo +++ b/tests/nodes/equal_fp8x23/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp8x23/output_0.cairo b/tests/nodes/equal_fp8x23/output_0.cairo index e3cabff16..df58147cb 100644 --- a/tests/nodes/equal_fp8x23/output_0.cairo +++ b/tests/nodes/equal_fp8x23/output_0.cairo @@ -1,36 +1,35 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); + data.append(1); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(1); data.append(0); data.append(0); data.append(0); - data.append(1); data.append(0); + data.append(1); + data.append(1); data.append(0); data.append(0); data.append(0); diff --git a/tests/nodes/equal_fp8x23_broadcast.cairo b/tests/nodes/equal_fp8x23_broadcast.cairo index 83d41e98d..590193505 100644 --- a/tests/nodes/equal_fp8x23_broadcast.cairo +++ b/tests/nodes/equal_fp8x23_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::U32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::FP8x23TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; #[test] #[available_gas(2000000000)] fn test_equal_fp8x23_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.equal(@input_1); + let y = input_0.equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/equal_fp8x23_broadcast/input_0.cairo b/tests/nodes/equal_fp8x23_broadcast/input_0.cairo index d8bc8c715..31927729d 100644 --- a/tests/nodes/equal_fp8x23_broadcast/input_0.cairo +++ b/tests/nodes/equal_fp8x23_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp8x23_broadcast/input_1.cairo b/tests/nodes/equal_fp8x23_broadcast/input_1.cairo index d956bca2e..6ffcceb3b 100644 --- a/tests/nodes/equal_fp8x23_broadcast/input_1.cairo +++ b/tests/nodes/equal_fp8x23_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -10,6 +10,6 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_fp8x23_broadcast/output_0.cairo b/tests/nodes/equal_fp8x23_broadcast/output_0.cairo index f90460c3d..417a71252 100644 --- a/tests/nodes/equal_fp8x23_broadcast/output_0.cairo +++ b/tests/nodes/equal_fp8x23_broadcast/output_0.cairo @@ -1,15 +1,14 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); + data.append(0); data.append(0); data.append(0); data.append(0); diff --git a/tests/nodes/equal_i32.cairo b/tests/nodes/equal_i32.cairo index 176a2566f..c2612bc6a 100644 --- a/tests/nodes/equal_i32.cairo +++ b/tests/nodes/equal_i32.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::U32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorDiv}; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; #[test] #[available_gas(2000000000)] fn test_equal_i32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.equal(@input_1); + let y = input_0.equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/equal_i32/input_0.cairo b/tests/nodes/equal_i32/input_0.cairo index 78e20a27c..0e2586cdb 100644 --- a/tests/nodes/equal_i32/input_0.cairo +++ b/tests/nodes/equal_i32/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorDiv}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -11,31 +10,31 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(-3); - data.append(-1); - data.append(-2); data.append(2); + data.append(2); + data.append(-3); + data.append(1); + data.append(-3); + data.append(0); data.append(-1); - data.append(-2); - data.append(-1); - data.append(-2); + data.append(-3); data.append(-3); data.append(-1); - data.append(2); + data.append(-1); data.append(0); data.append(1); + data.append(-1); data.append(0); data.append(0); data.append(-3); data.append(1); + data.append(-3); data.append(0); - data.append(2); - data.append(-2); + data.append(1); + data.append(-3); + data.append(-1); data.append(2); data.append(-2); data.append(-3); - data.append(-2); - data.append(-1); - data.append(-1); - data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i32/input_1.cairo b/tests/nodes/equal_i32/input_1.cairo index 5b85fd8c3..c6b5a6fbe 100644 --- a/tests/nodes/equal_i32/input_1.cairo +++ b/tests/nodes/equal_i32/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorDiv}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); - data.append(-1); + data.append(-2); + data.append(-3); data.append(-3); - data.append(1); data.append(-2); - data.append(-1); - data.append(1); data.append(-3); + data.append(2); data.append(0); - data.append(-1); data.append(1); data.append(-1); + data.append(-3); + data.append(-1); + data.append(2); + data.append(-1); data.append(1); - data.append(0); - data.append(1); - data.append(-2); + data.append(-3); + data.append(-1); + data.append(2); + data.append(2); data.append(-2); + data.append(-1); data.append(-2); data.append(-3); - data.append(-3); - data.append(-3); - data.append(2); + data.append(-1); + data.append(-1); data.append(0); data.append(0); - data.append(-3); - data.append(-3); + data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i32/output_0.cairo b/tests/nodes/equal_i32/output_0.cairo index b20beee14..0cdc2c69d 100644 --- a/tests/nodes/equal_i32/output_0.cairo +++ b/tests/nodes/equal_i32/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -15,13 +14,12 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); - data.append(1); - data.append(1); data.append(0); data.append(1); data.append(0); data.append(0); - data.append(0); + data.append(1); + data.append(1); data.append(0); data.append(0); data.append(1); @@ -34,6 +32,7 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); diff --git a/tests/nodes/equal_i32_broadcast.cairo b/tests/nodes/equal_i32_broadcast.cairo index 348d93461..012a7e165 100644 --- a/tests/nodes/equal_i32_broadcast.cairo +++ b/tests/nodes/equal_i32_broadcast.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::U32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorDiv}; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; #[test] #[available_gas(2000000000)] fn test_equal_i32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.equal(@input_1); + let y = input_0.equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/equal_i32_broadcast/input_0.cairo b/tests/nodes/equal_i32_broadcast/input_0.cairo index 03adb3015..cefd51703 100644 --- a/tests/nodes/equal_i32_broadcast/input_0.cairo +++ b/tests/nodes/equal_i32_broadcast/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorDiv}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,9 +8,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); data.append(1); + data.append(-2); data.append(0); - data.append(-3); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i32_broadcast/input_1.cairo b/tests/nodes/equal_i32_broadcast/input_1.cairo index 26cafa62e..fed8199ca 100644 --- a/tests/nodes/equal_i32_broadcast/input_1.cairo +++ b/tests/nodes/equal_i32_broadcast/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorDiv}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,7 +8,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-2); + data.append(2); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i32_broadcast/output_0.cairo b/tests/nodes/equal_i32_broadcast/output_0.cairo index 6e83f693f..9a2391c78 100644 --- a/tests/nodes/equal_i32_broadcast/output_0.cairo +++ b/tests/nodes/equal_i32_broadcast/output_0.cairo @@ -1,17 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); data.append(0); - data.append(1); data.append(0); data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i8.cairo b/tests/nodes/equal_i8.cairo index 1c2ac5eda..e19689a8d 100644 --- a/tests/nodes/equal_i8.cairo +++ b/tests/nodes/equal_i8.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::U32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::I8TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::operators::tensor::{I8Tensor, I8TensorDiv}; use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; #[test] #[available_gas(2000000000)] fn test_equal_i8() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.equal(@input_1); + let y = input_0.equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/equal_i8/input_0.cairo b/tests/nodes/equal_i8/input_0.cairo index a5aecfe37..09ea9171a 100644 --- a/tests/nodes/equal_i8/input_0.cairo +++ b/tests/nodes/equal_i8/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorDiv}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -11,31 +10,31 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(-2); - data.append(2); - data.append(2); + data.append(0); + data.append(-3); + data.append(-3); data.append(1); - data.append(2); data.append(-3); - data.append(-2); - data.append(2); data.append(-1); - data.append(0); data.append(1); - data.append(-1); + data.append(-3); data.append(2); - data.append(-2); + data.append(0); + data.append(-1); data.append(1); data.append(-3); - data.append(-1); - data.append(0); - data.append(-2); + data.append(-3); + data.append(-3); data.append(1); data.append(2); - data.append(1); data.append(2); + data.append(-3); + data.append(0); + data.append(0); + data.append(-1); data.append(-2); - data.append(2); - data.append(1); - data.append(2); + data.append(-3); + data.append(-1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i8/input_1.cairo b/tests/nodes/equal_i8/input_1.cairo index 9348c6f87..b1ab51213 100644 --- a/tests/nodes/equal_i8/input_1.cairo +++ b/tests/nodes/equal_i8/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorDiv}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); - data.append(-2); - data.append(-2); - data.append(-2); - data.append(0); + data.append(-1); + data.append(-1); data.append(2); data.append(-3); - data.append(-1); data.append(1); data.append(-3); + data.append(-2); data.append(1); data.append(0); - data.append(1); - data.append(-2); + data.append(-1); + data.append(-1); + data.append(-1); data.append(0); + data.append(-1); data.append(1); - data.append(0); + data.append(1); + data.append(-1); data.append(-2); + data.append(-1); + data.append(2); data.append(-3); - data.append(0); + data.append(1); data.append(-2); - data.append(-1); data.append(1); - data.append(2); - data.append(-1); data.append(-3); - data.append(-1); + data.append(1); + data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i8/output_0.cairo b/tests/nodes/equal_i8/output_0.cairo index 5cb81ce4d..452bd9d81 100644 --- a/tests/nodes/equal_i8/output_0.cairo +++ b/tests/nodes/equal_i8/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -13,18 +12,17 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); + data.append(1); + data.append(1); + data.append(1); data.append(0); - data.append(0); - data.append(0); - data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); data.append(1); data.append(0); data.append(0); - data.append(1); - data.append(0); data.append(0); data.append(0); data.append(0); @@ -35,6 +33,7 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); + data.append(1); data.append(0); data.append(0); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/equal_i8_broadcast.cairo b/tests/nodes/equal_i8_broadcast.cairo index 36d76b4e9..cc1fd18de 100644 --- a/tests/nodes/equal_i8_broadcast.cairo +++ b/tests/nodes/equal_i8_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::U32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::I8TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::operators::tensor::{I8Tensor, I8TensorDiv}; use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; #[test] #[available_gas(2000000000)] fn test_equal_i8_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.equal(@input_1); + let y = input_0.equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/equal_i8_broadcast/input_0.cairo b/tests/nodes/equal_i8_broadcast/input_0.cairo index b4d674e13..428f70adf 100644 --- a/tests/nodes/equal_i8_broadcast/input_0.cairo +++ b/tests/nodes/equal_i8_broadcast/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorDiv}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,9 +8,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-1); - data.append(-3); + data.append(-2); data.append(2); + data.append(-1); data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i8_broadcast/input_1.cairo b/tests/nodes/equal_i8_broadcast/input_1.cairo index e09cd290a..51830d474 100644 --- a/tests/nodes/equal_i8_broadcast/input_1.cairo +++ b/tests/nodes/equal_i8_broadcast/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorDiv}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,7 +8,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); - data.append(2); + data.append(-2); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_i8_broadcast/output_0.cairo b/tests/nodes/equal_i8_broadcast/output_0.cairo index be93162e2..d2fab9fd0 100644 --- a/tests/nodes/equal_i8_broadcast/output_0.cairo +++ b/tests/nodes/equal_i8_broadcast/output_0.cairo @@ -1,17 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); + data.append(1); data.append(0); data.append(0); data.append(0); - data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32.cairo b/tests/nodes/equal_u32.cairo index b5dd76f1b..6591ecbc6 100644 --- a/tests/nodes/equal_u32.cairo +++ b/tests/nodes/equal_u32.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; #[test] #[available_gas(2000000000)] fn test_equal_u32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.equal(@input_1); + let y = input_0.equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/equal_u32/input_0.cairo b/tests/nodes/equal_u32/input_0.cairo index 1101fa40c..9c3b88326 100644 --- a/tests/nodes/equal_u32/input_0.cairo +++ b/tests/nodes/equal_u32/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -11,31 +10,31 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); - data.append(1); + data.append(5); + data.append(0); data.append(2); - data.append(4); data.append(5); data.append(3); - data.append(1); + data.append(2); + data.append(5); + data.append(5); + data.append(0); + data.append(5); + data.append(0); data.append(0); - data.append(3); - data.append(4); - data.append(1); data.append(2); + data.append(5); data.append(4); data.append(1); - data.append(2); data.append(3); - data.append(3); - data.append(0); - data.append(0); - data.append(3); - data.append(5); data.append(1); + data.append(2); + data.append(0); data.append(5); + data.append(2); + data.append(4); + data.append(2); data.append(1); data.append(1); - data.append(5); - data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32/input_1.cairo b/tests/nodes/equal_u32/input_1.cairo index b9d3a0f2a..39331ddba 100644 --- a/tests/nodes/equal_u32/input_1.cairo +++ b/tests/nodes/equal_u32/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(4); - data.append(4); data.append(4); data.append(2); - data.append(2); - data.append(4); - data.append(4); - data.append(4); - data.append(5); data.append(3); + data.append(4); + data.append(1); data.append(2); + data.append(0); + data.append(0); data.append(4); - data.append(3); data.append(0); data.append(2); - data.append(3); data.append(0); + data.append(0); + data.append(5); + data.append(3); + data.append(5); data.append(2); - data.append(4); + data.append(0); data.append(2); + data.append(3); data.append(5); data.append(4); data.append(1); + data.append(2); data.append(5); - data.append(0); + data.append(3); + data.append(4); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32/output_0.cairo b/tests/nodes/equal_u32/output_0.cairo index bf289dc4b..339b2c840 100644 --- a/tests/nodes/equal_u32/output_0.cairo +++ b/tests/nodes/equal_u32/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -11,9 +10,9 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); - data.append(1); data.append(0); - data.append(1); + data.append(0); + data.append(0); data.append(0); data.append(0); data.append(0); @@ -21,6 +20,11 @@ fn output_0() -> Tensor { data.append(0); data.append(1); data.append(0); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(0); data.append(0); data.append(0); data.append(0); @@ -28,14 +32,9 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(0); data.append(0); - data.append(1); data.append(0); - data.append(1); - data.append(1); - data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32_broadcast.cairo b/tests/nodes/equal_u32_broadcast.cairo index ffe162a58..30a7868a5 100644 --- a/tests/nodes/equal_u32_broadcast.cairo +++ b/tests/nodes/equal_u32_broadcast.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; #[test] #[available_gas(2000000000)] fn test_equal_u32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.equal(@input_1); + let y = input_0.equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/equal_u32_broadcast/input_0.cairo b/tests/nodes/equal_u32_broadcast/input_0.cairo index 2948b2d0f..d47d6b523 100644 --- a/tests/nodes/equal_u32_broadcast/input_0.cairo +++ b/tests/nodes/equal_u32_broadcast/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,9 +8,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); data.append(5); - data.append(1); - data.append(3); + data.append(5); + data.append(2); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32_broadcast/input_1.cairo b/tests/nodes/equal_u32_broadcast/input_1.cairo index dcf2e9f1f..7c4c61dff 100644 --- a/tests/nodes/equal_u32_broadcast/input_1.cairo +++ b/tests/nodes/equal_u32_broadcast/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,7 +8,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); + data.append(4); data.append(4); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/equal_u32_broadcast/output_0.cairo b/tests/nodes/equal_u32_broadcast/output_0.cairo index 0e89fbf7a..417a71252 100644 --- a/tests/nodes/equal_u32_broadcast/output_0.cairo +++ b/tests/nodes/equal_u32_broadcast/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); diff --git a/tests/nodes/greater_equal_fp16x16.cairo b/tests/nodes/greater_equal_fp16x16.cairo index c4115953d..ade17fab9 100644 --- a/tests/nodes/greater_equal_fp16x16.cairo +++ b/tests/nodes/greater_equal_fp16x16.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; #[test] #[available_gas(2000000000)] fn test_greater_equal_fp16x16() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.greater_equal(@input_1); + let y = input_0.greater_equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/greater_equal_fp16x16/input_0.cairo b/tests/nodes/greater_equal_fp16x16/input_0.cairo index 8483baa87..2357065aa 100644 --- a/tests/nodes/greater_equal_fp16x16/input_0.cairo +++ b/tests/nodes/greater_equal_fp16x16/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -11,31 +11,31 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_fp16x16/input_1.cairo b/tests/nodes/greater_equal_fp16x16/input_1.cairo index 71948a671..2aca7c1e4 100644 --- a/tests/nodes/greater_equal_fp16x16/input_1.cairo +++ b/tests/nodes/greater_equal_fp16x16/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_fp16x16/output_0.cairo b/tests/nodes/greater_equal_fp16x16/output_0.cairo index 6743e67ee..3838fd015 100644 --- a/tests/nodes/greater_equal_fp16x16/output_0.cairo +++ b/tests/nodes/greater_equal_fp16x16/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -11,31 +10,31 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); - data.append(1); - data.append(1); data.append(0); data.append(0); data.append(1); data.append(1); data.append(1); data.append(1); - data.append(0); - data.append(0); - data.append(0); data.append(1); - data.append(0); + data.append(1); + data.append(1); data.append(1); data.append(1); data.append(1); data.append(0); data.append(1); data.append(1); + data.append(0); data.append(1); data.append(0); data.append(0); data.append(1); + data.append(0); data.append(1); data.append(1); + data.append(0); data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_fp16x16_broadcast.cairo b/tests/nodes/greater_equal_fp16x16_broadcast.cairo index 5a121c54d..88b0d8221 100644 --- a/tests/nodes/greater_equal_fp16x16_broadcast.cairo +++ b/tests/nodes/greater_equal_fp16x16_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; #[test] #[available_gas(2000000000)] fn test_greater_equal_fp16x16_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.greater_equal(@input_1); + let y = input_0.greater_equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/greater_equal_fp16x16_broadcast/input_0.cairo b/tests/nodes/greater_equal_fp16x16_broadcast/input_0.cairo index 061494d06..d5b16c572 100644 --- a/tests/nodes/greater_equal_fp16x16_broadcast/input_0.cairo +++ b/tests/nodes/greater_equal_fp16x16_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_fp16x16_broadcast/input_1.cairo b/tests/nodes/greater_equal_fp16x16_broadcast/input_1.cairo index 0ba9a94ab..2f4253292 100644 --- a/tests/nodes/greater_equal_fp16x16_broadcast/input_1.cairo +++ b/tests/nodes/greater_equal_fp16x16_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -10,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_fp16x16_broadcast/output_0.cairo b/tests/nodes/greater_equal_fp16x16_broadcast/output_0.cairo index 5c225df37..0779d09f7 100644 --- a/tests/nodes/greater_equal_fp16x16_broadcast/output_0.cairo +++ b/tests/nodes/greater_equal_fp16x16_broadcast/output_0.cairo @@ -1,18 +1,23 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); + data.append(0); data.append(1); + data.append(0); data.append(1); data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(1); data.append(0); data.append(0); data.append(1); @@ -20,22 +25,16 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(1); - data.append(1); - data.append(1); data.append(0); - data.append(1); - data.append(1); - data.append(1); data.append(0); data.append(0); - data.append(1); - data.append(1); + data.append(0); + data.append(0); data.append(0); data.append(1); data.append(1); + data.append(0); data.append(1); data.append(1); - data.append(1); - data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_fp8x23.cairo b/tests/nodes/greater_equal_fp8x23.cairo index d2aaba160..fe69d50b0 100644 --- a/tests/nodes/greater_equal_fp8x23.cairo +++ b/tests/nodes/greater_equal_fp8x23.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; use orion::operators::tensor::FP8x23TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; #[test] #[available_gas(2000000000)] fn test_greater_equal_fp8x23() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.greater_equal(@input_1); + let y = input_0.greater_equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/greater_equal_fp8x23/input_0.cairo b/tests/nodes/greater_equal_fp8x23/input_0.cairo index e6be700e3..dc99936c5 100644 --- a/tests/nodes/greater_equal_fp8x23/input_0.cairo +++ b/tests/nodes/greater_equal_fp8x23/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -11,31 +11,31 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_fp8x23/input_1.cairo b/tests/nodes/greater_equal_fp8x23/input_1.cairo index abba030c2..181dad555 100644 --- a/tests/nodes/greater_equal_fp8x23/input_1.cairo +++ b/tests/nodes/greater_equal_fp8x23/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_fp8x23/output_0.cairo b/tests/nodes/greater_equal_fp8x23/output_0.cairo index d81121a87..0102d6e42 100644 --- a/tests/nodes/greater_equal_fp8x23/output_0.cairo +++ b/tests/nodes/greater_equal_fp8x23/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -12,19 +11,22 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); data.append(1); - data.append(1); data.append(0); + data.append(0); + data.append(1); + data.append(1); data.append(1); data.append(0); data.append(1); data.append(0); data.append(0); + data.append(0); + data.append(0); data.append(1); data.append(1); data.append(0); data.append(1); data.append(0); - data.append(1); data.append(0); data.append(1); data.append(1); @@ -32,10 +34,7 @@ fn output_0() -> Tensor { data.append(1); data.append(1); data.append(1); - data.append(1); - data.append(1); data.append(0); data.append(1); - data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_fp8x23_broadcast.cairo b/tests/nodes/greater_equal_fp8x23_broadcast.cairo index 76296131d..4cb89ba2a 100644 --- a/tests/nodes/greater_equal_fp8x23_broadcast.cairo +++ b/tests/nodes/greater_equal_fp8x23_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; use orion::operators::tensor::FP8x23TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; #[test] #[available_gas(2000000000)] fn test_greater_equal_fp8x23_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.greater_equal(@input_1); + let y = input_0.greater_equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/greater_equal_fp8x23_broadcast/input_0.cairo b/tests/nodes/greater_equal_fp8x23_broadcast/input_0.cairo index 4d715888c..fc181dce7 100644 --- a/tests/nodes/greater_equal_fp8x23_broadcast/input_0.cairo +++ b/tests/nodes/greater_equal_fp8x23_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -12,30 +12,30 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_fp8x23_broadcast/input_1.cairo b/tests/nodes/greater_equal_fp8x23_broadcast/input_1.cairo index a0c9b6c37..b776b414a 100644 --- a/tests/nodes/greater_equal_fp8x23_broadcast/input_1.cairo +++ b/tests/nodes/greater_equal_fp8x23_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -10,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_fp8x23_broadcast/output_0.cairo b/tests/nodes/greater_equal_fp8x23_broadcast/output_0.cairo index 48282ca99..d8fc4b6d7 100644 --- a/tests/nodes/greater_equal_fp8x23_broadcast/output_0.cairo +++ b/tests/nodes/greater_equal_fp8x23_broadcast/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -11,29 +10,29 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); + data.append(0); + data.append(0); data.append(1); data.append(1); data.append(1); data.append(1); - data.append(0); - data.append(1); data.append(1); data.append(1); data.append(1); data.append(1); data.append(1); data.append(1); + data.append(0); data.append(1); data.append(1); - data.append(0); data.append(1); data.append(1); data.append(1); data.append(0); - data.append(0); - data.append(1); data.append(1); data.append(1); + data.append(0); + data.append(0); data.append(1); data.append(1); data.append(1); diff --git a/tests/nodes/greater_equal_i32.cairo b/tests/nodes/greater_equal_i32.cairo index f3322db91..7fde407d8 100644 --- a/tests/nodes/greater_equal_i32.cairo +++ b/tests/nodes/greater_equal_i32.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; #[test] #[available_gas(2000000000)] fn test_greater_equal_i32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.greater_equal(@input_1); + let y = input_0.greater_equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/greater_equal_i32/input_0.cairo b/tests/nodes/greater_equal_i32/input_0.cairo index 1f121a788..462799e9e 100644 --- a/tests/nodes/greater_equal_i32/input_0.cairo +++ b/tests/nodes/greater_equal_i32/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorSub}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -11,31 +10,31 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); + data.append(-2); data.append(-1); - data.append(2); + data.append(0); + data.append(-2); + data.append(-2); data.append(-1); data.append(-1); data.append(2); - data.append(-3); - data.append(-2); - data.append(-3); - data.append(-3); data.append(-1); - data.append(1); - data.append(-3); - data.append(2); + data.append(-2); + data.append(0); data.append(2); + data.append(-3); data.append(2); - data.append(0); data.append(-2); - data.append(-2); - data.append(0); + data.append(1); data.append(0); - data.append(-2); - data.append(-2); - data.append(-1); data.append(-1); + data.append(0); + data.append(-3); + data.append(0); + data.append(-3); data.append(-2); data.append(1); + data.append(-1); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_i32/input_1.cairo b/tests/nodes/greater_equal_i32/input_1.cairo index 63912b68e..71c047607 100644 --- a/tests/nodes/greater_equal_i32/input_1.cairo +++ b/tests/nodes/greater_equal_i32/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorSub}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-2); - data.append(0); - data.append(0); - data.append(-3); data.append(2); - data.append(-1); - data.append(-3); - data.append(-1); data.append(-2); + data.append(-3); data.append(1); data.append(-3); data.append(-2); - data.append(-1); - data.append(-2); + data.append(0); data.append(-1); data.append(1); - data.append(-2); data.append(-3); - data.append(1); - data.append(2); + data.append(-3); data.append(2); + data.append(1); + data.append(0); data.append(-3); - data.append(-2); data.append(-1); - data.append(1); + data.append(-2); + data.append(2); data.append(0); data.append(-2); + data.append(2); + data.append(1); + data.append(-1); + data.append(0); + data.append(0); + data.append(0); + data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_i32/output_0.cairo b/tests/nodes/greater_equal_i32/output_0.cairo index 5ba6fc6fc..f511f554d 100644 --- a/tests/nodes/greater_equal_i32/output_0.cairo +++ b/tests/nodes/greater_equal_i32/output_0.cairo @@ -1,16 +1,14 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); data.append(0); data.append(1); data.append(1); @@ -18,24 +16,25 @@ fn output_0() -> Tensor { data.append(1); data.append(1); data.append(0); - data.append(0); - data.append(0); data.append(1); data.append(1); - data.append(0); - data.append(1); data.append(1); data.append(1); + data.append(0); data.append(1); + data.append(0); data.append(1); data.append(0); + data.append(1); data.append(0); data.append(0); data.append(1); - data.append(1); - data.append(1); data.append(0); data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_i32_broadcast.cairo b/tests/nodes/greater_equal_i32_broadcast.cairo index 51e7dce37..3d5663ed1 100644 --- a/tests/nodes/greater_equal_i32_broadcast.cairo +++ b/tests/nodes/greater_equal_i32_broadcast.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; #[test] #[available_gas(2000000000)] fn test_greater_equal_i32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.greater_equal(@input_1); + let y = input_0.greater_equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/greater_equal_i32_broadcast/input_0.cairo b/tests/nodes/greater_equal_i32_broadcast/input_0.cairo index 0e2bb0a87..b65e8387d 100644 --- a/tests/nodes/greater_equal_i32_broadcast/input_0.cairo +++ b/tests/nodes/greater_equal_i32_broadcast/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorSub}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(-3); - data.append(0); - data.append(2); - data.append(1); - data.append(-3); - data.append(-1); - data.append(1); data.append(0); data.append(0); - data.append(-2); data.append(-1); - data.append(1); data.append(-1); + data.append(2); + data.append(2); data.append(-1); + data.append(2); + data.append(0); data.append(-1); data.append(2); data.append(-2); data.append(1); data.append(2); data.append(1); - data.append(1); - data.append(1); data.append(-1); - data.append(2); - data.append(1); + data.append(-1); + data.append(-3); + data.append(-2); + data.append(-2); data.append(1); + data.append(0); + data.append(-1); + data.append(-2); + data.append(-3); + data.append(-2); + data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_i32_broadcast/input_1.cairo b/tests/nodes/greater_equal_i32_broadcast/input_1.cairo index 60c233254..05ca97a1c 100644 --- a/tests/nodes/greater_equal_i32_broadcast/input_1.cairo +++ b/tests/nodes/greater_equal_i32_broadcast/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorSub}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,8 +9,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(1); - data.append(-1); - data.append(-3); + data.append(0); + data.append(0); + data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_i32_broadcast/output_0.cairo b/tests/nodes/greater_equal_i32_broadcast/output_0.cairo index 78caa692e..4b42da2bc 100644 --- a/tests/nodes/greater_equal_i32_broadcast/output_0.cairo +++ b/tests/nodes/greater_equal_i32_broadcast/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -11,15 +10,7 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(1); data.append(1); - data.append(1); - data.append(0); data.append(0); data.append(0); data.append(1); @@ -27,14 +18,22 @@ fn output_0() -> Tensor { data.append(1); data.append(1); data.append(1); + data.append(0); data.append(1); + data.append(0); data.append(1); data.append(1); data.append(1); data.append(1); data.append(1); + data.append(0); + data.append(0); + data.append(0); data.append(1); data.append(1); + data.append(0); + data.append(0); + data.append(0); data.append(1); data.append(1); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/greater_equal_i8.cairo b/tests/nodes/greater_equal_i8.cairo index eacd7c574..7e408313f 100644 --- a/tests/nodes/greater_equal_i8.cairo +++ b/tests/nodes/greater_equal_i8.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I8TensorPartialEq; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I8TensorPartialEq; +use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; #[test] #[available_gas(2000000000)] fn test_greater_equal_i8() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.greater_equal(@input_1); + let y = input_0.greater_equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/greater_equal_i8/input_0.cairo b/tests/nodes/greater_equal_i8/input_0.cairo index 7158fd825..029abdd11 100644 --- a/tests/nodes/greater_equal_i8/input_0.cairo +++ b/tests/nodes/greater_equal_i8/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorSub}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -11,31 +10,31 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(2); - data.append(-1); + data.append(-2); data.append(2); data.append(1); - data.append(-3); data.append(0); - data.append(-2); - data.append(-2); - data.append(-2); - data.append(-1); data.append(-3); - data.append(1); data.append(2); - data.append(-2); - data.append(-3); data.append(-3); data.append(-2); - data.append(0); + data.append(-3); data.append(2); data.append(-1); data.append(0); + data.append(-1); + data.append(0); + data.append(1); + data.append(-2); + data.append(-2); + data.append(-1); + data.append(-2); data.append(-2); data.append(-3); + data.append(1); + data.append(1); data.append(0); - data.append(0); - data.append(-3); + data.append(1); data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_i8/input_1.cairo b/tests/nodes/greater_equal_i8/input_1.cairo index 2d4f3f080..a13f4e52b 100644 --- a/tests/nodes/greater_equal_i8/input_1.cairo +++ b/tests/nodes/greater_equal_i8/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorSub}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(-1); + data.append(-2); + data.append(-3); data.append(1); + data.append(0); + data.append(-3); + data.append(0); data.append(-3); + data.append(-2); data.append(-1); - data.append(1); data.append(-1); data.append(2); - data.append(-1); data.append(-3); data.append(1); - data.append(-1); - data.append(1); - data.append(2); - data.append(0); data.append(1); data.append(1); data.append(-2); - data.append(-3); - data.append(-3); - data.append(-1); - data.append(-2); data.append(2); - data.append(-2); + data.append(2); + data.append(-1); + data.append(-1); + data.append(-3); data.append(-1); data.append(0); data.append(-1); - data.append(1); data.append(2); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_i8/output_0.cairo b/tests/nodes/greater_equal_i8/output_0.cairo index 989556af5..6166962c9 100644 --- a/tests/nodes/greater_equal_i8/output_0.cairo +++ b/tests/nodes/greater_equal_i8/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -14,8 +13,11 @@ fn output_0() -> Tensor { data.append(1); data.append(1); data.append(1); - data.append(0); - data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(1); data.append(0); data.append(1); data.append(0); @@ -23,16 +25,13 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(1); + data.append(1); data.append(0); data.append(0); data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(1); data.append(0); data.append(1); - data.append(0); + data.append(1); data.append(1); data.append(1); data.append(0); diff --git a/tests/nodes/greater_equal_i8_broadcast.cairo b/tests/nodes/greater_equal_i8_broadcast.cairo index 287f60b66..53ffd4c26 100644 --- a/tests/nodes/greater_equal_i8_broadcast.cairo +++ b/tests/nodes/greater_equal_i8_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I8TensorPartialEq; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I8TensorPartialEq; +use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; #[test] #[available_gas(2000000000)] fn test_greater_equal_i8_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.greater_equal(@input_1); + let y = input_0.greater_equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/greater_equal_i8_broadcast/input_0.cairo b/tests/nodes/greater_equal_i8_broadcast/input_0.cairo index cbc4f5a1e..721b747ee 100644 --- a/tests/nodes/greater_equal_i8_broadcast/input_0.cairo +++ b/tests/nodes/greater_equal_i8_broadcast/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorSub}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -12,30 +11,30 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(-2); data.append(0); - data.append(2); - data.append(2); - data.append(1); - data.append(-1); - data.append(-1); data.append(-3); + data.append(-2); data.append(0); - data.append(1); - data.append(-3); data.append(-3); data.append(1); - data.append(2); - data.append(-2); + data.append(-1); + data.append(0); data.append(2); data.append(0); + data.append(2); + data.append(2); data.append(-3); - data.append(1); + data.append(-1); data.append(0); data.append(0); + data.append(-3); data.append(2); + data.append(-2); + data.append(-3); data.append(2); + data.append(1); data.append(-1); - data.append(-2); + data.append(-1); + data.append(1); data.append(-3); - data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_i8_broadcast/input_1.cairo b/tests/nodes/greater_equal_i8_broadcast/input_1.cairo index 7b6f77e67..acb5683ff 100644 --- a/tests/nodes/greater_equal_i8_broadcast/input_1.cairo +++ b/tests/nodes/greater_equal_i8_broadcast/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorSub}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,8 +9,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(-3); - data.append(-2); data.append(2); + data.append(2); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_i8_broadcast/output_0.cairo b/tests/nodes/greater_equal_i8_broadcast/output_0.cairo index 38f6a5fdf..d1b238ebc 100644 --- a/tests/nodes/greater_equal_i8_broadcast/output_0.cairo +++ b/tests/nodes/greater_equal_i8_broadcast/output_0.cairo @@ -1,21 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(1); + data.append(0); + data.append(0); + data.append(0); data.append(0); data.append(0); data.append(0); @@ -23,19 +19,22 @@ fn output_0() -> Tensor { data.append(1); data.append(1); data.append(1); - data.append(1); + data.append(0); data.append(1); data.append(1); data.append(0); data.append(0); data.append(1); data.append(1); + data.append(0); data.append(1); - data.append(1); - data.append(1); + data.append(0); + data.append(0); data.append(1); data.append(0); data.append(0); + data.append(1); + data.append(1); data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_u32.cairo b/tests/nodes/greater_equal_u32.cairo index 2cf132189..e548d6cd6 100644 --- a/tests/nodes/greater_equal_u32.cairo +++ b/tests/nodes/greater_equal_u32.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; #[test] #[available_gas(2000000000)] fn test_greater_equal_u32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.greater_equal(@input_1); + let y = input_0.greater_equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/greater_equal_u32/input_0.cairo b/tests/nodes/greater_equal_u32/input_0.cairo index 972937864..5a6f968eb 100644 --- a/tests/nodes/greater_equal_u32/input_0.cairo +++ b/tests/nodes/greater_equal_u32/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(5); - data.append(0); - data.append(5); - data.append(3); - data.append(3); - data.append(1); - data.append(1); data.append(2); - data.append(1); - data.append(2); - data.append(0); - data.append(3); data.append(4); + data.append(5); data.append(4); + data.append(3); data.append(1); data.append(0); - data.append(2); data.append(0); - data.append(1); data.append(5); + data.append(1); data.append(2); data.append(4); data.append(3); + data.append(2); data.append(4); data.append(5); data.append(4); + data.append(0); + data.append(1); + data.append(2); + data.append(0); + data.append(5); + data.append(1); + data.append(0); data.append(5); + data.append(4); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_u32/input_1.cairo b/tests/nodes/greater_equal_u32/input_1.cairo index 5af7f9955..1130efb80 100644 --- a/tests/nodes/greater_equal_u32/input_1.cairo +++ b/tests/nodes/greater_equal_u32/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(2); - data.append(2); - data.append(3); - data.append(2); - data.append(0); - data.append(5); - data.append(5); - data.append(2); - data.append(2); - data.append(2); - data.append(1); data.append(0); - data.append(2); + data.append(4); data.append(5); data.append(5); data.append(4); - data.append(4); - data.append(5); + data.append(1); + data.append(1); data.append(5); + data.append(0); + data.append(3); + data.append(3); + data.append(1); + data.append(0); + data.append(0); data.append(5); data.append(0); + data.append(4); data.append(1); data.append(1); - data.append(3); + data.append(4); data.append(5); + data.append(1); data.append(0); + data.append(4); + data.append(4); + data.append(4); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_u32/output_0.cairo b/tests/nodes/greater_equal_u32/output_0.cairo index 712606141..3f8b7f407 100644 --- a/tests/nodes/greater_equal_u32/output_0.cairo +++ b/tests/nodes/greater_equal_u32/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -11,31 +10,31 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); - data.append(0); - data.append(1); - data.append(1); data.append(1); data.append(1); data.append(0); data.append(0); + data.append(1); + data.append(0); data.append(0); data.append(1); data.append(0); + data.append(0); data.append(1); data.append(1); data.append(1); data.append(0); - data.append(0); - data.append(0); - data.append(0); + data.append(1); + data.append(1); data.append(0); data.append(1); data.append(0); + data.append(0); data.append(1); data.append(1); + data.append(0); data.append(1); data.append(1); data.append(0); - data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_u32_broadcast.cairo b/tests/nodes/greater_equal_u32_broadcast.cairo index e7b4cd338..5d40f386b 100644 --- a/tests/nodes/greater_equal_u32_broadcast.cairo +++ b/tests/nodes/greater_equal_u32_broadcast.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; #[test] #[available_gas(2000000000)] fn test_greater_equal_u32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.greater_equal(@input_1); + let y = input_0.greater_equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/greater_equal_u32_broadcast/input_0.cairo b/tests/nodes/greater_equal_u32_broadcast/input_0.cairo index 8ceb24b67..5bd64a3fa 100644 --- a/tests/nodes/greater_equal_u32_broadcast/input_0.cairo +++ b/tests/nodes/greater_equal_u32_broadcast/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(3); data.append(1); - data.append(3); - data.append(3); - data.append(4); - data.append(2); data.append(2); data.append(4); - data.append(4); - data.append(3); - data.append(0); + data.append(5); data.append(1); data.append(5); - data.append(0); - data.append(0); - data.append(0); data.append(4); - data.append(3); data.append(1); data.append(3); + data.append(1); data.append(3); + data.append(0); + data.append(5); + data.append(5); + data.append(4); + data.append(0); data.append(5); data.append(0); + data.append(5); + data.append(2); data.append(4); data.append(4); data.append(2); data.append(1); + data.append(3); + data.append(1); + data.append(3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_u32_broadcast/input_1.cairo b/tests/nodes/greater_equal_u32_broadcast/input_1.cairo index d30f37f4d..365ec35ab 100644 --- a/tests/nodes/greater_equal_u32_broadcast/input_1.cairo +++ b/tests/nodes/greater_equal_u32_broadcast/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,8 +9,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(3); + data.append(1); + data.append(2); data.append(2); - data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_equal_u32_broadcast/output_0.cairo b/tests/nodes/greater_equal_u32_broadcast/output_0.cairo index cdc03dbeb..d97dd0a71 100644 --- a/tests/nodes/greater_equal_u32_broadcast/output_0.cairo +++ b/tests/nodes/greater_equal_u32_broadcast/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -11,31 +10,31 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); - data.append(0); data.append(1); data.append(1); data.append(1); + data.append(0); data.append(1); data.append(1); + data.append(0); data.append(1); data.append(1); data.append(1); data.append(0); - data.append(0); data.append(1); - data.append(0); - data.append(0); data.append(1); data.append(1); + data.append(0); data.append(1); data.append(0); data.append(1); data.append(1); data.append(1); - data.append(0); data.append(1); data.append(1); + data.append(0); data.append(1); + data.append(0); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_fp16x16.cairo b/tests/nodes/greater_fp16x16.cairo index 9e4fcd4c2..2a7a03143 100644 --- a/tests/nodes/greater_fp16x16.cairo +++ b/tests/nodes/greater_fp16x16.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_greater_fp16x16() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.greater(@input_1); + let y = input_0.greater(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/greater_fp16x16/input_0.cairo b/tests/nodes/greater_fp16x16/input_0.cairo index 3945db72b..62da96aaf 100644 --- a/tests/nodes/greater_fp16x16/input_0.cairo +++ b/tests/nodes/greater_fp16x16/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_fp16x16/input_1.cairo b/tests/nodes/greater_fp16x16/input_1.cairo index e68118f8b..ab8cd104f 100644 --- a/tests/nodes/greater_fp16x16/input_1.cairo +++ b/tests/nodes/greater_fp16x16/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_fp16x16/output_0.cairo b/tests/nodes/greater_fp16x16/output_0.cairo index e23a4e669..48e9c4f38 100644 --- a/tests/nodes/greater_fp16x16/output_0.cairo +++ b/tests/nodes/greater_fp16x16/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -12,28 +11,28 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(0); + data.append(1); data.append(0); data.append(0); data.append(1); + data.append(1); + data.append(0); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(0); - data.append(1); data.append(1); data.append(0); data.append(0); data.append(1); data.append(0); - data.append(1); data.append(0); data.append(0); data.append(0); data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); diff --git a/tests/nodes/greater_fp16x16_broadcast.cairo b/tests/nodes/greater_fp16x16_broadcast.cairo index 1ae074724..26a9933e4 100644 --- a/tests/nodes/greater_fp16x16_broadcast.cairo +++ b/tests/nodes/greater_fp16x16_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_greater_fp16x16_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.greater(@input_1); + let y = input_0.greater(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/greater_fp16x16_broadcast/input_0.cairo b/tests/nodes/greater_fp16x16_broadcast/input_0.cairo index f62741a74..db67a5335 100644 --- a/tests/nodes/greater_fp16x16_broadcast/input_0.cairo +++ b/tests/nodes/greater_fp16x16_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_fp16x16_broadcast/input_1.cairo b/tests/nodes/greater_fp16x16_broadcast/input_1.cairo index 138779aa4..1a7845d54 100644 --- a/tests/nodes/greater_fp16x16_broadcast/input_1.cairo +++ b/tests/nodes/greater_fp16x16_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_fp16x16_broadcast/output_0.cairo b/tests/nodes/greater_fp16x16_broadcast/output_0.cairo index 0e89fbf7a..983d99cbd 100644 --- a/tests/nodes/greater_fp16x16_broadcast/output_0.cairo +++ b/tests/nodes/greater_fp16x16_broadcast/output_0.cairo @@ -1,17 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); + data.append(1); data.append(0); - data.append(0); - data.append(0); + data.append(1); data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_fp8x23.cairo b/tests/nodes/greater_fp8x23.cairo index 811be552b..235e35bb9 100644 --- a/tests/nodes/greater_fp8x23.cairo +++ b/tests/nodes/greater_fp8x23.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_greater_fp8x23() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.greater(@input_1); + let y = input_0.greater(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/greater_fp8x23/input_0.cairo b/tests/nodes/greater_fp8x23/input_0.cairo index 103fae1dd..ab05d7c9d 100644 --- a/tests/nodes/greater_fp8x23/input_0.cairo +++ b/tests/nodes/greater_fp8x23/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_fp8x23/input_1.cairo b/tests/nodes/greater_fp8x23/input_1.cairo index 52bf5b265..583b859dd 100644 --- a/tests/nodes/greater_fp8x23/input_1.cairo +++ b/tests/nodes/greater_fp8x23/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_fp8x23/output_0.cairo b/tests/nodes/greater_fp8x23/output_0.cairo index 7c5b41ca7..2c4aa7864 100644 --- a/tests/nodes/greater_fp8x23/output_0.cairo +++ b/tests/nodes/greater_fp8x23/output_0.cairo @@ -1,37 +1,36 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); + data.append(0); data.append(1); data.append(0); data.append(0); data.append(0); data.append(0); data.append(1); - data.append(0); - data.append(0); data.append(1); - data.append(0); data.append(1); data.append(0); data.append(1); - data.append(0); + data.append(1); + data.append(1); data.append(1); data.append(0); + data.append(1); data.append(0); data.append(0); + data.append(1); data.append(0); - data.append(0); - data.append(0); + data.append(1); + data.append(1); data.append(0); data.append(0); data.append(0); diff --git a/tests/nodes/greater_fp8x23_broadcast.cairo b/tests/nodes/greater_fp8x23_broadcast.cairo index 7ace215fb..d7fa1d5a8 100644 --- a/tests/nodes/greater_fp8x23_broadcast.cairo +++ b/tests/nodes/greater_fp8x23_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_greater_fp8x23_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.greater(@input_1); + let y = input_0.greater(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/greater_fp8x23_broadcast/input_0.cairo b/tests/nodes/greater_fp8x23_broadcast/input_0.cairo index ce997ece1..a18362086 100644 --- a/tests/nodes/greater_fp8x23_broadcast/input_0.cairo +++ b/tests/nodes/greater_fp8x23_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -11,7 +11,7 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_fp8x23_broadcast/input_1.cairo b/tests/nodes/greater_fp8x23_broadcast/input_1.cairo index 4f8364ca5..2bb7abce1 100644 --- a/tests/nodes/greater_fp8x23_broadcast/input_1.cairo +++ b/tests/nodes/greater_fp8x23_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_fp8x23_broadcast/output_0.cairo b/tests/nodes/greater_fp8x23_broadcast/output_0.cairo index f90460c3d..983d99cbd 100644 --- a/tests/nodes/greater_fp8x23_broadcast/output_0.cairo +++ b/tests/nodes/greater_fp8x23_broadcast/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); @@ -11,7 +10,7 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); data.append(0); - data.append(0); + data.append(1); data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_i32.cairo b/tests/nodes/greater_i32.cairo index 9d999181b..4d1ce8975 100644 --- a/tests/nodes/greater_i32.cairo +++ b/tests/nodes/greater_i32.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorDiv}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::I32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_greater_i32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.greater(@input_1); + let y = input_0.greater(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/greater_i32/input_0.cairo b/tests/nodes/greater_i32/input_0.cairo index e3f61960e..c5a8c900a 100644 --- a/tests/nodes/greater_i32/input_0.cairo +++ b/tests/nodes/greater_i32/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorDiv}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(-3); data.append(0); - data.append(1); - data.append(0); + data.append(-2); + data.append(-3); + data.append(-3); data.append(2); + data.append(-1); + data.append(-3); + data.append(1); + data.append(-3); data.append(2); data.append(0); data.append(-1); - data.append(0); - data.append(2); - data.append(-2); + data.append(1); data.append(-1); - data.append(0); + data.append(2); data.append(1); data.append(1); - data.append(-3); - data.append(0); data.append(-2); + data.append(2); + data.append(-1); + data.append(-2); + data.append(0); data.append(-3); - data.append(1); data.append(-3); - data.append(0); - data.append(1); - data.append(-1); data.append(2); data.append(0); - data.append(-2); - data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_i32/input_1.cairo b/tests/nodes/greater_i32/input_1.cairo index d2de3b6cd..7f2ac7cf6 100644 --- a/tests/nodes/greater_i32/input_1.cairo +++ b/tests/nodes/greater_i32/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorDiv}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(-1); + data.append(0); data.append(-3); data.append(1); data.append(0); data.append(0); + data.append(-1); + data.append(-1); data.append(-2); - data.append(-3); - data.append(-2); - data.append(-2); - data.append(-2); - data.append(-2); - data.append(0); - data.append(-2); - data.append(-2); - data.append(1); + data.append(-1); + data.append(2); data.append(1); - data.append(-2); - data.append(-2); - data.append(0); + data.append(2); data.append(-3); - data.append(-2); - data.append(0); + data.append(2); + data.append(2); data.append(0); data.append(2); + data.append(2); data.append(0); - data.append(-2); + data.append(-3); data.append(-1); - data.append(2); + data.append(-3); + data.append(0); + data.append(-2); + data.append(1); + data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_i32/output_0.cairo b/tests/nodes/greater_i32/output_0.cairo index dc7cfb3af..5375527f6 100644 --- a/tests/nodes/greater_i32/output_0.cairo +++ b/tests/nodes/greater_i32/output_0.cairo @@ -1,41 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); data.append(0); data.append(0); data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(1); data.append(0); data.append(0); data.append(1); - data.append(1); data.append(0); data.append(0); data.append(1); data.append(0); data.append(0); + data.append(0); + data.append(0); data.append(1); data.append(0); data.append(0); data.append(1); data.append(0); + data.append(0); data.append(1); data.append(1); data.append(0); + data.append(1); data.append(0); + data.append(0); + data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_i32_broadcast.cairo b/tests/nodes/greater_i32_broadcast.cairo index 6642b10af..7ebe716df 100644 --- a/tests/nodes/greater_i32_broadcast.cairo +++ b/tests/nodes/greater_i32_broadcast.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorDiv}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::I32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_greater_i32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.greater(@input_1); + let y = input_0.greater(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/greater_i32_broadcast/input_0.cairo b/tests/nodes/greater_i32_broadcast/input_0.cairo index b1c4959f8..ee130c085 100644 --- a/tests/nodes/greater_i32_broadcast/input_0.cairo +++ b/tests/nodes/greater_i32_broadcast/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorDiv}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,9 +8,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-2); + data.append(0); data.append(1); - data.append(2); - data.append(-3); + data.append(0); + data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_i32_broadcast/input_1.cairo b/tests/nodes/greater_i32_broadcast/input_1.cairo index 801e78f8e..790fd4ff9 100644 --- a/tests/nodes/greater_i32_broadcast/input_1.cairo +++ b/tests/nodes/greater_i32_broadcast/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorDiv}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,7 +8,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(2); - data.append(-2); + data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_i32_broadcast/output_0.cairo b/tests/nodes/greater_i32_broadcast/output_0.cairo index 6e83f693f..417a71252 100644 --- a/tests/nodes/greater_i32_broadcast/output_0.cairo +++ b/tests/nodes/greater_i32_broadcast/output_0.cairo @@ -1,16 +1,15 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); data.append(0); - data.append(1); + data.append(0); data.append(0); data.append(0); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/greater_i8.cairo b/tests/nodes/greater_i8.cairo index b73f17d8f..1eec0cc13 100644 --- a/tests/nodes/greater_i8.cairo +++ b/tests/nodes/greater_i8.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::I8TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::I8TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorDiv}; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_greater_i8() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.greater(@input_1); + let y = input_0.greater(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/greater_i8/input_0.cairo b/tests/nodes/greater_i8/input_0.cairo index 00a84a925..0d74b0aee 100644 --- a/tests/nodes/greater_i8/input_0.cairo +++ b/tests/nodes/greater_i8/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorDiv}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(2); data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(-3); + data.append(-2); data.append(1); + data.append(-2); + data.append(-1); data.append(1); data.append(0); data.append(-3); + data.append(-2); + data.append(2); + data.append(0); data.append(-3); - data.append(-3); + data.append(-2); + data.append(2); + data.append(-1); + data.append(0); data.append(-3); data.append(1); data.append(0); + data.append(-2); + data.append(0); + data.append(-3); + data.append(-2); data.append(1); data.append(1); data.append(1); - data.append(2); data.append(-1); - data.append(2); - data.append(2); - data.append(1); - data.append(2); - data.append(2); - data.append(-1); - data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_i8/input_1.cairo b/tests/nodes/greater_i8/input_1.cairo index e623f5495..79831e085 100644 --- a/tests/nodes/greater_i8/input_1.cairo +++ b/tests/nodes/greater_i8/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorDiv}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(2); + data.append(0); data.append(-2); data.append(0); data.append(-1); - data.append(-3); - data.append(0); - data.append(0); data.append(1); - data.append(-3); - data.append(-3); data.append(1); - data.append(2); - data.append(0); - data.append(-3); - data.append(-3); data.append(-1); - data.append(1); data.append(2); - data.append(-2); data.append(2); + data.append(1); + data.append(1); data.append(-2); + data.append(1); + data.append(-1); data.append(-2); - data.append(-2); - data.append(-2); + data.append(1); + data.append(-1); data.append(2); - data.append(-2); data.append(1); + data.append(1); + data.append(-1); + data.append(2); + data.append(-1); + data.append(2); + data.append(-3); + data.append(2); + data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_i8/output_0.cairo b/tests/nodes/greater_i8/output_0.cairo index 076748be3..3864fbbe4 100644 --- a/tests/nodes/greater_i8/output_0.cairo +++ b/tests/nodes/greater_i8/output_0.cairo @@ -1,41 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); data.append(1); data.append(0); data.append(1); - data.append(1); data.append(0); - data.append(1); + data.append(0); data.append(0); data.append(1); data.append(0); data.append(0); + data.append(1); + data.append(0); data.append(0); data.append(0); - data.append(1); data.append(1); data.append(1); data.append(0); data.append(0); - data.append(1); + data.append(0); + data.append(0); data.append(0); data.append(1); - data.append(1); - data.append(1); - data.append(1); + data.append(0); + data.append(0); data.append(0); data.append(1); data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_i8_broadcast.cairo b/tests/nodes/greater_i8_broadcast.cairo index 57b03f337..c1acc3950 100644 --- a/tests/nodes/greater_i8_broadcast.cairo +++ b/tests/nodes/greater_i8_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::I8TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::I8TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorDiv}; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_greater_i8_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.greater(@input_1); + let y = input_0.greater(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/greater_i8_broadcast/input_0.cairo b/tests/nodes/greater_i8_broadcast/input_0.cairo index 509936b85..7fadef780 100644 --- a/tests/nodes/greater_i8_broadcast/input_0.cairo +++ b/tests/nodes/greater_i8_broadcast/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorDiv}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,9 +8,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); + data.append(-2); data.append(-3); - data.append(0); data.append(-3); - data.append(-2); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_i8_broadcast/input_1.cairo b/tests/nodes/greater_i8_broadcast/input_1.cairo index 74374511f..2f8cf491d 100644 --- a/tests/nodes/greater_i8_broadcast/input_1.cairo +++ b/tests/nodes/greater_i8_broadcast/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorDiv}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,7 +8,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-2); - data.append(-2); + data.append(-3); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_i8_broadcast/output_0.cairo b/tests/nodes/greater_i8_broadcast/output_0.cairo index 6e83f693f..d2fab9fd0 100644 --- a/tests/nodes/greater_i8_broadcast/output_0.cairo +++ b/tests/nodes/greater_i8_broadcast/output_0.cairo @@ -1,17 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); data.append(1); data.append(0); data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_u32.cairo b/tests/nodes/greater_u32.cairo index 71b0af1c1..2eb538434 100644 --- a/tests/nodes/greater_u32.cairo +++ b/tests/nodes/greater_u32.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_greater_u32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.greater(@input_1); + let y = input_0.greater(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/greater_u32/input_0.cairo b/tests/nodes/greater_u32/input_0.cairo index cb20dbd6f..b2ec6068a 100644 --- a/tests/nodes/greater_u32/input_0.cairo +++ b/tests/nodes/greater_u32/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); - data.append(2); + data.append(4); data.append(3); data.append(3); - data.append(2); - data.append(2); - data.append(0); - data.append(0); - data.append(2); - data.append(0); - data.append(4); - data.append(1); + data.append(5); data.append(1); data.append(2); + data.append(3); data.append(1); - data.append(1); + data.append(5); + data.append(5); + data.append(5); + data.append(3); + data.append(4); data.append(0); data.append(2); + data.append(2); data.append(4); - data.append(3); - data.append(1); - data.append(3); data.append(1); data.append(2); + data.append(2); data.append(0); - data.append(5); + data.append(1); data.append(3); + data.append(4); + data.append(4); + data.append(3); + data.append(5); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_u32/input_1.cairo b/tests/nodes/greater_u32/input_1.cairo index 7a4ea0c19..2346c043c 100644 --- a/tests/nodes/greater_u32/input_1.cairo +++ b/tests/nodes/greater_u32/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(1); + data.append(4); data.append(0); data.append(2); + data.append(2); + data.append(4); + data.append(4); + data.append(1); data.append(0); - data.append(3); - data.append(5); data.append(0); - data.append(3); - data.append(3); - data.append(1); + data.append(4); data.append(2); + data.append(2); + data.append(5); data.append(1); - data.append(3); - data.append(1); data.append(4); + data.append(4); + data.append(1); + data.append(5); + data.append(5); data.append(3); - data.append(2); data.append(3); data.append(2); + data.append(4); + data.append(3); data.append(3); - data.append(0); - data.append(0); - data.append(5); data.append(1); - data.append(2); - data.append(0); - data.append(2); - data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_u32/output_0.cairo b/tests/nodes/greater_u32/output_0.cairo index a7a8c7b52..4ff4b7ffe 100644 --- a/tests/nodes/greater_u32/output_0.cairo +++ b/tests/nodes/greater_u32/output_0.cairo @@ -1,24 +1,27 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); + data.append(1); data.append(0); data.append(1); + data.append(1); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(0); data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(1); data.append(0); data.append(1); data.append(0); @@ -29,13 +32,9 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); data.append(0); data.append(1); + data.append(0); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_u32_broadcast.cairo b/tests/nodes/greater_u32_broadcast.cairo index 3c15cded9..febcc1674 100644 --- a/tests/nodes/greater_u32_broadcast.cairo +++ b/tests/nodes/greater_u32_broadcast.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_greater_u32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.greater(@input_1); + let y = input_0.greater(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/greater_u32_broadcast/input_0.cairo b/tests/nodes/greater_u32_broadcast/input_0.cairo index 44d87a588..972de6b6b 100644 --- a/tests/nodes/greater_u32_broadcast/input_0.cairo +++ b/tests/nodes/greater_u32_broadcast/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,9 +8,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); - data.append(4); - data.append(3); data.append(4); + data.append(5); + data.append(2); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_u32_broadcast/input_1.cairo b/tests/nodes/greater_u32_broadcast/input_1.cairo index 291c107db..2831d01cc 100644 --- a/tests/nodes/greater_u32_broadcast/input_1.cairo +++ b/tests/nodes/greater_u32_broadcast/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,7 +8,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); - data.append(5); + data.append(2); + data.append(3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/greater_u32_broadcast/output_0.cairo b/tests/nodes/greater_u32_broadcast/output_0.cairo index 4818b6836..7e2cee38d 100644 --- a/tests/nodes/greater_u32_broadcast/output_0.cairo +++ b/tests/nodes/greater_u32_broadcast/output_0.cairo @@ -1,17 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); data.append(1); + data.append(1); + data.append(0); data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_1D.cairo b/tests/nodes/is_inf_fp16x16.cairo similarity index 60% rename from tests/nodes/reduce_sum_single_axis_fp16x16_1D.cairo rename to tests/nodes/is_inf_fp16x16.cairo index b91251f21..d09a4a6f8 100644 --- a/tests/nodes/reduce_sum_single_axis_fp16x16_1D.cairo +++ b/tests/nodes/is_inf_fp16x16.cairo @@ -3,18 +3,20 @@ mod output_0; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::BoolTensorPartialEq; use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; #[test] #[available_gas(2000000000)] -fn test_reduce_sum_single_axis_fp16x16_1D() { +fn test_is_inf_fp16x16() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.reduce_sum_single_axis(0, false); + let y = TensorTrait::is_inf(@input_0, Option::None, Option::None); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_default/input_0.cairo b/tests/nodes/is_inf_fp16x16/input_0.cairo similarity index 63% rename from tests/nodes/reduce_sum_single_axis_fp16x16_2D_default/input_0.cairo rename to tests/nodes/is_inf_fp16x16/input_0.cairo index 6a8b7cb09..439f44bf1 100644 --- a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_default/input_0.cairo +++ b/tests/nodes/is_inf_fp16x16/input_0.cairo @@ -5,13 +5,14 @@ use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); + shape.append(6); let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 78643, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FixedTrait::NEG_INF()); + data.append(FP16x16 { mag: 183500, sign: false }); + data.append(FixedTrait::POS_INF()); + data.append(FixedTrait::NEG_INF()); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_inf_fp16x16/output_0.cairo b/tests/nodes/is_inf_fp16x16/output_0.cairo new file mode 100644 index 000000000..059edbf71 --- /dev/null +++ b/tests/nodes/is_inf_fp16x16/output_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::BoolTensor; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_1D.cairo b/tests/nodes/is_inf_fp8x23.cairo similarity index 59% rename from tests/nodes/reduce_sum_single_axis_fp8x23_1D.cairo rename to tests/nodes/is_inf_fp8x23.cairo index 9640ee345..a65951dd7 100644 --- a/tests/nodes/reduce_sum_single_axis_fp8x23_1D.cairo +++ b/tests/nodes/is_inf_fp8x23.cairo @@ -3,18 +3,20 @@ mod output_0; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::BoolTensor; use orion::operators::tensor::FP8x23TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; #[test] #[available_gas(2000000000)] -fn test_reduce_sum_single_axis_fp8x23_1D() { +fn test_is_inf_fp8x23() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.reduce_sum_single_axis(0, false); + let y = TensorTrait::is_inf(@input_0, Option::None, Option::None); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_1D/input_0.cairo b/tests/nodes/is_inf_fp8x23/input_0.cairo similarity index 62% rename from tests/nodes/reduce_sum_single_axis_fp8x23_1D/input_0.cairo rename to tests/nodes/is_inf_fp8x23/input_0.cairo index 0b5fd5c6a..29b4c52e7 100644 --- a/tests/nodes/reduce_sum_single_axis_fp8x23_1D/input_0.cairo +++ b/tests/nodes/is_inf_fp8x23/input_0.cairo @@ -5,11 +5,14 @@ use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); - shape.append(3); + shape.append(6); let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 10066329, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FixedTrait::NEG_INF()); + data.append(FP8x23 { mag: 23488102, sign: false }); + data.append(FixedTrait::POS_INF()); + data.append(FixedTrait::NEG_INF()); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_inf_fp8x23/output_0.cairo b/tests/nodes/is_inf_fp8x23/output_0.cairo new file mode 100644 index 000000000..059edbf71 --- /dev/null +++ b/tests/nodes/is_inf_fp8x23/output_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::BoolTensor; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/is_inf_i32.cairo b/tests/nodes/is_inf_i32.cairo index dfa790fd0..3dd3b234d 100644 --- a/tests/nodes/is_inf_i32.cairo +++ b/tests/nodes/is_inf_i32.cairo @@ -2,21 +2,21 @@ mod input_0; mod output_0; -use orion::operators::tensor::U32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::BoolTensorPartialEq; use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_is_inf_i32() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = TensorTrait::is_inf(@input_0, Option::None, Option::None); + let y = TensorTrait::is_inf(@input_0, Option::None, Option::None); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/is_inf_i32/input_0.cairo b/tests/nodes/is_inf_i32/input_0.cairo index 0bdb9d040..7b44e2c7c 100644 --- a/tests/nodes/is_inf_i32/input_0.cairo +++ b/tests/nodes/is_inf_i32/input_0.cairo @@ -1,7 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::numbers::{NumberTrait}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,9 +10,9 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(-1); data.append(0); - data.append(-1); + data.append(NumberTrait::INF()); data.append(8); - data.append(1); - data.append(-1); + data.append(NumberTrait::INF() * -1); + data.append(NumberTrait::INF()); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_inf_i32/output_0.cairo b/tests/nodes/is_inf_i32/output_0.cairo index 05f1fa0a3..059edbf71 100644 --- a/tests/nodes/is_inf_i32/output_0.cairo +++ b/tests/nodes/is_inf_i32/output_0.cairo @@ -1,18 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(6); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(1); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(true); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_inf_i8.cairo b/tests/nodes/is_inf_i8.cairo new file mode 100644 index 000000000..251e01480 --- /dev/null +++ b/tests/nodes/is_inf_i8.cairo @@ -0,0 +1,22 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::BoolTensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I8TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_is_inf_i8() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = TensorTrait::is_inf(@input_0, Option::None, Option::None); + + assert_eq(y, z); +} diff --git a/tests/nodes/is_inf_i8/input_0.cairo b/tests/nodes/is_inf_i8/input_0.cairo new file mode 100644 index 000000000..e9af358c2 --- /dev/null +++ b/tests/nodes/is_inf_i8/input_0.cairo @@ -0,0 +1,18 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::{NumberTrait}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(-1); + data.append(0); + data.append(NumberTrait::INF()); + data.append(8); + data.append(NumberTrait::INF() * -1); + data.append(NumberTrait::INF()); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/is_inf_i8/output_0.cairo b/tests/nodes/is_inf_i8/output_0.cairo new file mode 100644 index 000000000..059edbf71 --- /dev/null +++ b/tests/nodes/is_inf_i8/output_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::BoolTensor; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_empty_axes_input_noop.cairo b/tests/nodes/is_inf_u32.cairo similarity index 59% rename from tests/nodes/reduce_sum_empty_axes_input_noop.cairo rename to tests/nodes/is_inf_u32.cairo index 973479855..21cd1a9fc 100644 --- a/tests/nodes/reduce_sum_empty_axes_input_noop.cairo +++ b/tests/nodes/is_inf_u32.cairo @@ -2,19 +2,21 @@ mod input_0; mod output_0; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::BoolTensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] -fn test_reduce_sum_empty_axes_input_noop() { +fn test_is_inf_u32() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.reduce_sum(Option::None, Option::Some(true), Option::Some(true)); + let y = TensorTrait::is_inf(@input_0, Option::None, Option::None); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/reduce_sum_single_axis_u32_1D/input_0.cairo b/tests/nodes/is_inf_u32/input_0.cairo similarity index 72% rename from tests/nodes/reduce_sum_single_axis_u32_1D/input_0.cairo rename to tests/nodes/is_inf_u32/input_0.cairo index a350d75c1..a7bc9ea61 100644 --- a/tests/nodes/reduce_sum_single_axis_u32_1D/input_0.cairo +++ b/tests/nodes/is_inf_u32/input_0.cairo @@ -5,11 +5,14 @@ use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); - shape.append(3); + shape.append(6); let mut data = ArrayTrait::new(); - data.append(0); data.append(1); - data.append(2); + data.append(0); + data.append(NumberTrait::INF()); + data.append(8); + data.append(NumberTrait::INF()); + data.append(NumberTrait::INF()); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_inf_u32/output_0.cairo b/tests/nodes/is_inf_u32/output_0.cairo new file mode 100644 index 000000000..059edbf71 --- /dev/null +++ b/tests/nodes/is_inf_u32/output_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::BoolTensor; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/is_nan_fp16x16.cairo b/tests/nodes/is_nan_fp16x16.cairo index 70c1085da..4b5de16f4 100644 --- a/tests/nodes/is_nan_fp16x16.cairo +++ b/tests/nodes/is_nan_fp16x16.cairo @@ -2,21 +2,21 @@ mod input_0; mod output_0; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::BoolTensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::BoolTensor; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; #[test] #[available_gas(2000000000)] fn test_is_nan_fp16x16() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = TensorTrait::is_nan(@input_0); + let y = TensorTrait::is_nan(@input_0); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/is_nan_fp16x16/input_0.cairo b/tests/nodes/is_nan_fp16x16/input_0.cairo index 4a28acbb2..8c86af4fb 100644 --- a/tests/nodes/is_nan_fp16x16/input_0.cairo +++ b/tests/nodes/is_nan_fp16x16/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -10,9 +10,9 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP16x16 { mag: 78643, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FixedTrait::NaN()); data.append(FP16x16 { mag: 183500, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FixedTrait::NaN()); + data.append(FixedTrait::NaN()); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_nan_fp16x16/output_0.cairo b/tests/nodes/is_nan_fp16x16/output_0.cairo index 05f1fa0a3..059edbf71 100644 --- a/tests/nodes/is_nan_fp16x16/output_0.cairo +++ b/tests/nodes/is_nan_fp16x16/output_0.cairo @@ -1,18 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(6); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(1); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(true); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims.cairo b/tests/nodes/is_nan_fp8x23.cairo similarity index 51% rename from tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims.cairo rename to tests/nodes/is_nan_fp8x23.cairo index 0c4bede22..7f1d9682b 100644 --- a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims.cairo +++ b/tests/nodes/is_nan_fp8x23.cairo @@ -2,19 +2,21 @@ mod input_0; mod output_0; -use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::BoolTensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; #[test] #[available_gas(2000000000)] -fn test_reduce_sum_single_axis_fp8x23_2D_keepdims() { +fn test_is_nan_fp8x23() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.reduce_sum_single_axis(0, true); + let y = TensorTrait::is_nan(@input_0); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_axis_1.cairo b/tests/nodes/is_neg_inf_fp16x16.cairo similarity index 59% rename from tests/nodes/reduce_sum_single_axis_fp16x16_2D_axis_1.cairo rename to tests/nodes/is_neg_inf_fp16x16.cairo index ab1792bf1..99417cdae 100644 --- a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_axis_1.cairo +++ b/tests/nodes/is_neg_inf_fp16x16.cairo @@ -3,18 +3,20 @@ mod output_0; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::BoolTensorPartialEq; use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; #[test] #[available_gas(2000000000)] -fn test_reduce_sum_single_axis_fp16x16_2D_axis_1() { +fn test_is_neg_inf_fp16x16() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.reduce_sum_single_axis(1, false); + let y = TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0)); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_1D/input_0.cairo b/tests/nodes/is_neg_inf_fp16x16/input_0.cairo similarity index 63% rename from tests/nodes/reduce_sum_single_axis_fp16x16_1D/input_0.cairo rename to tests/nodes/is_neg_inf_fp16x16/input_0.cairo index 33e05815f..3da48092e 100644 --- a/tests/nodes/reduce_sum_single_axis_fp16x16_1D/input_0.cairo +++ b/tests/nodes/is_neg_inf_fp16x16/input_0.cairo @@ -5,11 +5,14 @@ use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); - shape.append(3); + shape.append(6); let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 1, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FixedTrait::POS_INF()); + data.append(FP16x16 { mag: 2, sign: false }); + data.append(FixedTrait::NEG_INF()); + data.append(FixedTrait::POS_INF()); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_neg_inf_fp16x16/output_0.cairo b/tests/nodes/is_neg_inf_fp16x16/output_0.cairo new file mode 100644 index 000000000..0e3c52449 --- /dev/null +++ b/tests/nodes/is_neg_inf_fp16x16/output_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::BoolTensor; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1.cairo b/tests/nodes/is_neg_inf_fp8x23.cairo similarity index 58% rename from tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1.cairo rename to tests/nodes/is_neg_inf_fp8x23.cairo index 84c6bf093..fd053abab 100644 --- a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1.cairo +++ b/tests/nodes/is_neg_inf_fp8x23.cairo @@ -3,18 +3,20 @@ mod output_0; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::BoolTensor; use orion::operators::tensor::FP8x23TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; #[test] #[available_gas(2000000000)] -fn test_reduce_sum_single_axis_fp8x23_2D_axis_1() { +fn test_is_neg_inf_fp8x23() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.reduce_sum_single_axis(1, false); + let y = TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0)); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/reduce_sum_single_axis_i8_1D/input_0.cairo b/tests/nodes/is_neg_inf_fp8x23/input_0.cairo similarity index 71% rename from tests/nodes/reduce_sum_single_axis_i8_1D/input_0.cairo rename to tests/nodes/is_neg_inf_fp8x23/input_0.cairo index 61b70cda3..9b60362be 100644 --- a/tests/nodes/reduce_sum_single_axis_i8_1D/input_0.cairo +++ b/tests/nodes/is_neg_inf_fp8x23/input_0.cairo @@ -5,11 +5,14 @@ use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); - shape.append(3); + shape.append(6); let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 1, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 1, sign: false }); + data.append(FixedTrait::POS_INF()); data.append(FP8x23 { mag: 2, sign: false }); + data.append(FixedTrait::NEG_INF()); + data.append(FixedTrait::POS_INF()); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_neg_inf_fp8x23/output_0.cairo b/tests/nodes/is_neg_inf_fp8x23/output_0.cairo new file mode 100644 index 000000000..0e3c52449 --- /dev/null +++ b/tests/nodes/is_neg_inf_fp8x23/output_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::BoolTensor; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/is_neg_inf_i32.cairo b/tests/nodes/is_neg_inf_i32.cairo index 054c70aed..4cdad3051 100644 --- a/tests/nodes/is_neg_inf_i32.cairo +++ b/tests/nodes/is_neg_inf_i32.cairo @@ -2,21 +2,21 @@ mod input_0; mod output_0; -use orion::operators::tensor::U32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::BoolTensorPartialEq; use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_is_neg_inf_i32() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0)); + let y = TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0)); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/is_neg_inf_i32/input_0.cairo b/tests/nodes/is_neg_inf_i32/input_0.cairo index 0bdb9d040..7b44e2c7c 100644 --- a/tests/nodes/is_neg_inf_i32/input_0.cairo +++ b/tests/nodes/is_neg_inf_i32/input_0.cairo @@ -1,7 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::numbers::{NumberTrait}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,9 +10,9 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(-1); data.append(0); - data.append(-1); + data.append(NumberTrait::INF()); data.append(8); - data.append(1); - data.append(-1); + data.append(NumberTrait::INF() * -1); + data.append(NumberTrait::INF()); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_neg_inf_i32/output_0.cairo b/tests/nodes/is_neg_inf_i32/output_0.cairo index c687bbb11..0e3c52449 100644 --- a/tests/nodes/is_neg_inf_i32/output_0.cairo +++ b/tests/nodes/is_neg_inf_i32/output_0.cairo @@ -1,18 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(6); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_neg_inf_i8.cairo b/tests/nodes/is_neg_inf_i8.cairo new file mode 100644 index 000000000..3bde58b79 --- /dev/null +++ b/tests/nodes/is_neg_inf_i8.cairo @@ -0,0 +1,22 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::BoolTensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I8TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_is_neg_inf_i8() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = TensorTrait::is_inf(@input_0, Option::Some(1), Option::Some(0)); + + assert_eq(y, z); +} diff --git a/tests/nodes/is_neg_inf_i8/input_0.cairo b/tests/nodes/is_neg_inf_i8/input_0.cairo new file mode 100644 index 000000000..e9af358c2 --- /dev/null +++ b/tests/nodes/is_neg_inf_i8/input_0.cairo @@ -0,0 +1,18 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::{NumberTrait}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(-1); + data.append(0); + data.append(NumberTrait::INF()); + data.append(8); + data.append(NumberTrait::INF() * -1); + data.append(NumberTrait::INF()); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/is_neg_inf_i8/output_0.cairo b/tests/nodes/is_neg_inf_i8/output_0.cairo new file mode 100644 index 000000000..0e3c52449 --- /dev/null +++ b/tests/nodes/is_neg_inf_i8/output_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::BoolTensor; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_default.cairo b/tests/nodes/is_pos_inf_fp16x16.cairo similarity index 59% rename from tests/nodes/reduce_sum_single_axis_fp16x16_2D_default.cairo rename to tests/nodes/is_pos_inf_fp16x16.cairo index 0faf8ca88..2d669088c 100644 --- a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_default.cairo +++ b/tests/nodes/is_pos_inf_fp16x16.cairo @@ -3,18 +3,20 @@ mod output_0; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::BoolTensorPartialEq; use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; #[test] #[available_gas(2000000000)] -fn test_reduce_sum_single_axis_fp16x16_2D_default() { +fn test_is_pos_inf_fp16x16() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.reduce_sum_single_axis(0, false); + let y = TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1)); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_axis_1/input_0.cairo b/tests/nodes/is_pos_inf_fp16x16/input_0.cairo similarity index 58% rename from tests/nodes/reduce_sum_single_axis_fp16x16_2D_axis_1/input_0.cairo rename to tests/nodes/is_pos_inf_fp16x16/input_0.cairo index 6a8b7cb09..68441d517 100644 --- a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_axis_1/input_0.cairo +++ b/tests/nodes/is_pos_inf_fp16x16/input_0.cairo @@ -5,13 +5,14 @@ use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); + shape.append(6); let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 1, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 4294967295, sign: false }); + data.append(FP16x16 { mag: 2, sign: false }); + data.append(FP16x16 { mag: 4294967295, sign: true }); + data.append(FP16x16 { mag: 4294967295, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_pos_inf_fp16x16/output_0.cairo b/tests/nodes/is_pos_inf_fp16x16/output_0.cairo new file mode 100644 index 000000000..08be59d64 --- /dev/null +++ b/tests/nodes/is_pos_inf_fp16x16/output_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::BoolTensor; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_default.cairo b/tests/nodes/is_pos_inf_fp8x23.cairo similarity index 58% rename from tests/nodes/reduce_sum_single_axis_fp8x23_2D_default.cairo rename to tests/nodes/is_pos_inf_fp8x23.cairo index 5b9eb0fb1..85f70322a 100644 --- a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_default.cairo +++ b/tests/nodes/is_pos_inf_fp8x23.cairo @@ -3,18 +3,20 @@ mod output_0; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::BoolTensor; use orion::operators::tensor::FP8x23TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; #[test] #[available_gas(2000000000)] -fn test_reduce_sum_single_axis_fp8x23_2D_default() { +fn test_is_pos_inf_fp8x23() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.reduce_sum_single_axis(0, false); + let y = TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1)); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/reduce_sum_single_axis_i8_2D_axis_1/input_0.cairo b/tests/nodes/is_pos_inf_fp8x23/input_0.cairo similarity index 65% rename from tests/nodes/reduce_sum_single_axis_i8_2D_axis_1/input_0.cairo rename to tests/nodes/is_pos_inf_fp8x23/input_0.cairo index 53f6405e1..cc50787fb 100644 --- a/tests/nodes/reduce_sum_single_axis_i8_2D_axis_1/input_0.cairo +++ b/tests/nodes/is_pos_inf_fp8x23/input_0.cairo @@ -5,13 +5,14 @@ use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); + shape.append(6); let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 1, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 1, sign: false }); + data.append(FP8x23 { mag: 4294967295, sign: false }); data.append(FP8x23 { mag: 2, sign: false }); - data.append(FP8x23 { mag: 3, sign: false }); + data.append(FP8x23 { mag: 4294967295, sign: true }); + data.append(FP8x23 { mag: 4294967295, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_pos_inf_fp8x23/output_0.cairo b/tests/nodes/is_pos_inf_fp8x23/output_0.cairo new file mode 100644 index 000000000..08be59d64 --- /dev/null +++ b/tests/nodes/is_pos_inf_fp8x23/output_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::BoolTensor; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/is_pos_inf_i32.cairo b/tests/nodes/is_pos_inf_i32.cairo index 7227fafc6..35eea6426 100644 --- a/tests/nodes/is_pos_inf_i32.cairo +++ b/tests/nodes/is_pos_inf_i32.cairo @@ -2,21 +2,21 @@ mod input_0; mod output_0; -use orion::operators::tensor::U32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::BoolTensorPartialEq; use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_is_pos_inf_i32() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1)); + let y = TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1)); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/is_pos_inf_i32/input_0.cairo b/tests/nodes/is_pos_inf_i32/input_0.cairo index 0bdb9d040..7b44e2c7c 100644 --- a/tests/nodes/is_pos_inf_i32/input_0.cairo +++ b/tests/nodes/is_pos_inf_i32/input_0.cairo @@ -1,7 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::numbers::{NumberTrait}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,9 +10,9 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(-1); data.append(0); - data.append(-1); + data.append(NumberTrait::INF()); data.append(8); - data.append(1); - data.append(-1); + data.append(NumberTrait::INF() * -1); + data.append(NumberTrait::INF()); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_pos_inf_i32/output_0.cairo b/tests/nodes/is_pos_inf_i32/output_0.cairo index 96c5928ea..08be59d64 100644 --- a/tests/nodes/is_pos_inf_i32/output_0.cairo +++ b/tests/nodes/is_pos_inf_i32/output_0.cairo @@ -1,18 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(6); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(true); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/is_pos_inf_i8.cairo b/tests/nodes/is_pos_inf_i8.cairo new file mode 100644 index 000000000..5dc40cce1 --- /dev/null +++ b/tests/nodes/is_pos_inf_i8.cairo @@ -0,0 +1,22 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::BoolTensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I8TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_is_pos_inf_i8() { + let input_0 = input_0::input_0(); + let z = output_0::output_0(); + + let y = TensorTrait::is_inf(@input_0, Option::Some(0), Option::Some(1)); + + assert_eq(y, z); +} diff --git a/tests/nodes/is_pos_inf_i8/input_0.cairo b/tests/nodes/is_pos_inf_i8/input_0.cairo new file mode 100644 index 000000000..e9af358c2 --- /dev/null +++ b/tests/nodes/is_pos_inf_i8/input_0.cairo @@ -0,0 +1,18 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::{NumberTrait}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(-1); + data.append(0); + data.append(NumberTrait::INF()); + data.append(8); + data.append(NumberTrait::INF() * -1); + data.append(NumberTrait::INF()); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/is_pos_inf_i8/output_0.cairo b/tests/nodes/is_pos_inf_i8/output_0.cairo new file mode 100644 index 000000000..08be59d64 --- /dev/null +++ b/tests/nodes/is_pos_inf_i8/output_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::BoolTensor; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/less_equal_fp16x16.cairo b/tests/nodes/less_equal_fp16x16.cairo index fc14de86f..19fe42d2d 100644 --- a/tests/nodes/less_equal_fp16x16.cairo +++ b/tests/nodes/less_equal_fp16x16.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::FP16x16TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; #[test] #[available_gas(2000000000)] fn test_less_equal_fp16x16() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.less_equal(@input_1); + let y = input_0.less_equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/less_equal_fp16x16/input_0.cairo b/tests/nodes/less_equal_fp16x16/input_0.cairo index e98ef6e2f..ec0027d22 100644 --- a/tests/nodes/less_equal_fp16x16/input_0.cairo +++ b/tests/nodes/less_equal_fp16x16/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp16x16/input_1.cairo b/tests/nodes/less_equal_fp16x16/input_1.cairo index 8d9a605d9..5cc407dc5 100644 --- a/tests/nodes/less_equal_fp16x16/input_1.cairo +++ b/tests/nodes/less_equal_fp16x16/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -9,9 +9,9 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp16x16/output_0.cairo b/tests/nodes/less_equal_fp16x16/output_0.cairo index f90460c3d..7e2cee38d 100644 --- a/tests/nodes/less_equal_fp16x16/output_0.cairo +++ b/tests/nodes/less_equal_fp16x16/output_0.cairo @@ -1,16 +1,15 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); data.append(1); - data.append(0); + data.append(1); data.append(0); data.append(0); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/less_equal_fp16x16_broadcast.cairo b/tests/nodes/less_equal_fp16x16_broadcast.cairo index 27c3101db..6ca29eb78 100644 --- a/tests/nodes/less_equal_fp16x16_broadcast.cairo +++ b/tests/nodes/less_equal_fp16x16_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::FP16x16TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; #[test] #[available_gas(2000000000)] fn test_less_equal_fp16x16_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.less_equal(@input_1); + let y = input_0.less_equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/less_equal_fp16x16_broadcast/input_0.cairo b/tests/nodes/less_equal_fp16x16_broadcast/input_0.cairo index cf2986fd4..13261de0d 100644 --- a/tests/nodes/less_equal_fp16x16_broadcast/input_0.cairo +++ b/tests/nodes/less_equal_fp16x16_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp16x16_broadcast/input_1.cairo b/tests/nodes/less_equal_fp16x16_broadcast/input_1.cairo index e630cb30c..25a595400 100644 --- a/tests/nodes/less_equal_fp16x16_broadcast/input_1.cairo +++ b/tests/nodes/less_equal_fp16x16_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp16x16_broadcast/output_0.cairo b/tests/nodes/less_equal_fp16x16_broadcast/output_0.cairo index 5614176ce..31cf673d4 100644 --- a/tests/nodes/less_equal_fp16x16_broadcast/output_0.cairo +++ b/tests/nodes/less_equal_fp16x16_broadcast/output_0.cairo @@ -1,17 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); data.append(1); data.append(0); data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp8x23.cairo b/tests/nodes/less_equal_fp8x23.cairo index 7d0aa2e23..3ee472dce 100644 --- a/tests/nodes/less_equal_fp8x23.cairo +++ b/tests/nodes/less_equal_fp8x23.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; +use orion::operators::tensor::U32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP8x23TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_equal_fp8x23() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.less_equal(@input_1); + let y = input_0.less_equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/less_equal_fp8x23/input_0.cairo b/tests/nodes/less_equal_fp8x23/input_0.cairo index b51a0b8d0..cac7e356e 100644 --- a/tests/nodes/less_equal_fp8x23/input_0.cairo +++ b/tests/nodes/less_equal_fp8x23/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -10,8 +10,8 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp8x23/input_1.cairo b/tests/nodes/less_equal_fp8x23/input_1.cairo index cfb564ed9..6a5e5a086 100644 --- a/tests/nodes/less_equal_fp8x23/input_1.cairo +++ b/tests/nodes/less_equal_fp8x23/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -9,9 +9,9 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp8x23/output_0.cairo b/tests/nodes/less_equal_fp8x23/output_0.cairo index 4818b6836..07948a48e 100644 --- a/tests/nodes/less_equal_fp8x23/output_0.cairo +++ b/tests/nodes/less_equal_fp8x23/output_0.cairo @@ -1,17 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); data.append(0); - data.append(0); data.append(1); data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp8x23_broadcast.cairo b/tests/nodes/less_equal_fp8x23_broadcast.cairo index 7a5fb95cd..8cf36a6ba 100644 --- a/tests/nodes/less_equal_fp8x23_broadcast.cairo +++ b/tests/nodes/less_equal_fp8x23_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; +use orion::operators::tensor::U32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP8x23TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_equal_fp8x23_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.less_equal(@input_1); + let y = input_0.less_equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/less_equal_fp8x23_broadcast/input_0.cairo b/tests/nodes/less_equal_fp8x23_broadcast/input_0.cairo index fc3ed6319..597e948e1 100644 --- a/tests/nodes/less_equal_fp8x23_broadcast/input_0.cairo +++ b/tests/nodes/less_equal_fp8x23_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -10,8 +10,8 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp8x23_broadcast/input_1.cairo b/tests/nodes/less_equal_fp8x23_broadcast/input_1.cairo index 74307c4f3..6a7c55548 100644 --- a/tests/nodes/less_equal_fp8x23_broadcast/input_1.cairo +++ b/tests/nodes/less_equal_fp8x23_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -10,6 +10,6 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp8x23_broadcast/output_0.cairo b/tests/nodes/less_equal_fp8x23_broadcast/output_0.cairo index 085034f13..62010885f 100644 --- a/tests/nodes/less_equal_fp8x23_broadcast/output_0.cairo +++ b/tests/nodes/less_equal_fp8x23_broadcast/output_0.cairo @@ -1,16 +1,15 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); data.append(1); - data.append(0); + data.append(1); data.append(1); data.append(1); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/less_equal_i32.cairo b/tests/nodes/less_equal_i32.cairo index be4c222d1..3072a59b0 100644 --- a/tests/nodes/less_equal_i32.cairo +++ b/tests/nodes/less_equal_i32.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::I32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorDiv}; #[test] #[available_gas(2000000000)] fn test_less_equal_i32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.less_equal(@input_1); + let y = input_0.less_equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/less_equal_i32/input_0.cairo b/tests/nodes/less_equal_i32/input_0.cairo index 72ef4d47d..11c8e73ff 100644 --- a/tests/nodes/less_equal_i32/input_0.cairo +++ b/tests/nodes/less_equal_i32/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorDiv}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,8 +9,8 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(-3); + data.append(-3); data.append(-2); - data.append(-2); - data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i32/input_1.cairo b/tests/nodes/less_equal_i32/input_1.cairo index c9ebd1f8a..330426cd7 100644 --- a/tests/nodes/less_equal_i32/input_1.cairo +++ b/tests/nodes/less_equal_i32/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorDiv}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,8 +9,8 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(2); + data.append(2); data.append(-3); - data.append(-1); - data.append(-3); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i32/output_0.cairo b/tests/nodes/less_equal_i32/output_0.cairo index 87ee1df2e..8442d0d0c 100644 --- a/tests/nodes/less_equal_i32/output_0.cairo +++ b/tests/nodes/less_equal_i32/output_0.cairo @@ -1,17 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); data.append(1); - data.append(0); data.append(1); data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i32_broadcast.cairo b/tests/nodes/less_equal_i32_broadcast.cairo index 2b3e4f406..3657b38d9 100644 --- a/tests/nodes/less_equal_i32_broadcast.cairo +++ b/tests/nodes/less_equal_i32_broadcast.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::I32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorDiv}; #[test] #[available_gas(2000000000)] fn test_less_equal_i32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.less_equal(@input_1); + let y = input_0.less_equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/less_equal_i32_broadcast/input_0.cairo b/tests/nodes/less_equal_i32_broadcast/input_0.cairo index 78aa6b0a0..9f1d44f37 100644 --- a/tests/nodes/less_equal_i32_broadcast/input_0.cairo +++ b/tests/nodes/less_equal_i32_broadcast/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorDiv}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,8 +8,8 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(2); - data.append(-1); + data.append(-2); + data.append(1); data.append(-3); data.append(-2); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/less_equal_i32_broadcast/input_1.cairo b/tests/nodes/less_equal_i32_broadcast/input_1.cairo index d810c8dc3..dc3c54f94 100644 --- a/tests/nodes/less_equal_i32_broadcast/input_1.cairo +++ b/tests/nodes/less_equal_i32_broadcast/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorDiv}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,7 +8,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); + data.append(-2); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i32_broadcast/output_0.cairo b/tests/nodes/less_equal_i32_broadcast/output_0.cairo index b066124bb..31cf673d4 100644 --- a/tests/nodes/less_equal_i32_broadcast/output_0.cairo +++ b/tests/nodes/less_equal_i32_broadcast/output_0.cairo @@ -1,16 +1,15 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); data.append(1); + data.append(0); data.append(1); data.append(1); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/less_equal_i8.cairo b/tests/nodes/less_equal_i8.cairo index 80b1ae428..c86a70ec1 100644 --- a/tests/nodes/less_equal_i8.cairo +++ b/tests/nodes/less_equal_i8.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; use orion::operators::tensor::I8TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I8Tensor, I8TensorDiv}; #[test] #[available_gas(2000000000)] fn test_less_equal_i8() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.less_equal(@input_1); + let y = input_0.less_equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/less_equal_i8/input_0.cairo b/tests/nodes/less_equal_i8/input_0.cairo index c4c530cde..4f53a978e 100644 --- a/tests/nodes/less_equal_i8/input_0.cairo +++ b/tests/nodes/less_equal_i8/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorDiv}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,9 +8,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); + data.append(-3); + data.append(-1); data.append(-1); data.append(-2); - data.append(-2); - data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i8/input_1.cairo b/tests/nodes/less_equal_i8/input_1.cairo index 184156e2a..6cb982144 100644 --- a/tests/nodes/less_equal_i8/input_1.cairo +++ b/tests/nodes/less_equal_i8/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorDiv}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,9 +8,9 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-3); + data.append(0); + data.append(-2); data.append(1); - data.append(-1); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i8/output_0.cairo b/tests/nodes/less_equal_i8/output_0.cairo index eabe7662b..31cf673d4 100644 --- a/tests/nodes/less_equal_i8/output_0.cairo +++ b/tests/nodes/less_equal_i8/output_0.cairo @@ -1,17 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); + data.append(1); data.append(0); data.append(1); data.append(1); - data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i8_broadcast.cairo b/tests/nodes/less_equal_i8_broadcast.cairo index 33afe5af8..ac53e3aa6 100644 --- a/tests/nodes/less_equal_i8_broadcast.cairo +++ b/tests/nodes/less_equal_i8_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; use orion::operators::tensor::I8TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I8Tensor, I8TensorDiv}; #[test] #[available_gas(2000000000)] fn test_less_equal_i8_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.less_equal(@input_1); + let y = input_0.less_equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/less_equal_i8_broadcast/input_0.cairo b/tests/nodes/less_equal_i8_broadcast/input_0.cairo index 288365601..835e66354 100644 --- a/tests/nodes/less_equal_i8_broadcast/input_0.cairo +++ b/tests/nodes/less_equal_i8_broadcast/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorDiv}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,9 +8,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); + data.append(2); + data.append(2); + data.append(2); data.append(-3); - data.append(-3); - data.append(-3); - data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i8_broadcast/input_1.cairo b/tests/nodes/less_equal_i8_broadcast/input_1.cairo index 0f1ad9249..02ff8facd 100644 --- a/tests/nodes/less_equal_i8_broadcast/input_1.cairo +++ b/tests/nodes/less_equal_i8_broadcast/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorDiv}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,7 +8,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-2); - data.append(2); + data.append(-3); + data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i8_broadcast/output_0.cairo b/tests/nodes/less_equal_i8_broadcast/output_0.cairo index 0367c57b6..9a2391c78 100644 --- a/tests/nodes/less_equal_i8_broadcast/output_0.cairo +++ b/tests/nodes/less_equal_i8_broadcast/output_0.cairo @@ -1,17 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); - data.append(1); + data.append(0); + data.append(0); + data.append(0); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_u32.cairo b/tests/nodes/less_equal_u32.cairo index acc3ac6e5..8a1e7aab4 100644 --- a/tests/nodes/less_equal_u32.cairo +++ b/tests/nodes/less_equal_u32.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::U32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; #[test] #[available_gas(2000000000)] fn test_less_equal_u32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.less_equal(@input_1); + let y = input_0.less_equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/less_equal_u32/input_0.cairo b/tests/nodes/less_equal_u32/input_0.cairo index d8497931b..84b61d7cc 100644 --- a/tests/nodes/less_equal_u32/input_0.cairo +++ b/tests/nodes/less_equal_u32/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,9 +8,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); - data.append(5); data.append(1); + data.append(2); + data.append(5); data.append(5); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_u32/input_1.cairo b/tests/nodes/less_equal_u32/input_1.cairo index 652fdcffd..fe6539464 100644 --- a/tests/nodes/less_equal_u32/input_1.cairo +++ b/tests/nodes/less_equal_u32/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,8 +9,8 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); - data.append(1); - data.append(2); + data.append(4); data.append(5); + data.append(3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_u32/output_0.cairo b/tests/nodes/less_equal_u32/output_0.cairo index 085034f13..de313d890 100644 --- a/tests/nodes/less_equal_u32/output_0.cairo +++ b/tests/nodes/less_equal_u32/output_0.cairo @@ -1,17 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); data.append(0); data.append(1); data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_u32_broadcast.cairo b/tests/nodes/less_equal_u32_broadcast.cairo index e5d6b43ab..dc695687d 100644 --- a/tests/nodes/less_equal_u32_broadcast.cairo +++ b/tests/nodes/less_equal_u32_broadcast.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::U32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; #[test] #[available_gas(2000000000)] fn test_less_equal_u32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.less_equal(@input_1); + let y = input_0.less_equal(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/less_equal_u32_broadcast/input_0.cairo b/tests/nodes/less_equal_u32_broadcast/input_0.cairo index b6a6ddbcf..a6bf00a7c 100644 --- a/tests/nodes/less_equal_u32_broadcast/input_0.cairo +++ b/tests/nodes/less_equal_u32_broadcast/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,9 +8,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); data.append(0); - data.append(2); + data.append(5); + data.append(4); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_u32_broadcast/input_1.cairo b/tests/nodes/less_equal_u32_broadcast/input_1.cairo index 591525d47..8e7328b81 100644 --- a/tests/nodes/less_equal_u32_broadcast/input_1.cairo +++ b/tests/nodes/less_equal_u32_broadcast/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,7 +8,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(2); + data.append(1); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_u32_broadcast/output_0.cairo b/tests/nodes/less_equal_u32_broadcast/output_0.cairo index 897d076d9..ef770fa07 100644 --- a/tests/nodes/less_equal_u32_broadcast/output_0.cairo +++ b/tests/nodes/less_equal_u32_broadcast/output_0.cairo @@ -1,17 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); data.append(1); - data.append(1); - data.append(1); data.append(0); + data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16.cairo b/tests/nodes/less_fp16x16.cairo index ed163c56f..04ac88b63 100644 --- a/tests/nodes/less_fp16x16.cairo +++ b/tests/nodes/less_fp16x16.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_fp16x16() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.less(@input_1); + let y = input_0.less(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/less_fp16x16/input_0.cairo b/tests/nodes/less_fp16x16/input_0.cairo index 9082c0ec3..41fa7524d 100644 --- a/tests/nodes/less_fp16x16/input_0.cairo +++ b/tests/nodes/less_fp16x16/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16/input_1.cairo b/tests/nodes/less_fp16x16/input_1.cairo index bc42d9df3..fe0e56e41 100644 --- a/tests/nodes/less_fp16x16/input_1.cairo +++ b/tests/nodes/less_fp16x16/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16/output_0.cairo b/tests/nodes/less_fp16x16/output_0.cairo index 1b19d050d..ff7a8e63d 100644 --- a/tests/nodes/less_fp16x16/output_0.cairo +++ b/tests/nodes/less_fp16x16/output_0.cairo @@ -1,39 +1,38 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); + data.append(1); + data.append(1); data.append(0); - data.append(0); - data.append(0); - data.append(0); data.append(1); + data.append(1); + data.append(0); data.append(0); data.append(1); data.append(1); data.append(1); data.append(0); data.append(1); - data.append(0); data.append(1); data.append(1); data.append(0); + data.append(1); data.append(0); + data.append(1); data.append(0); data.append(1); data.append(1); data.append(0); - data.append(0); - data.append(0); - data.append(0); data.append(1); + data.append(0); data.append(1); data.append(0); data.append(0); diff --git a/tests/nodes/less_fp16x16_broadcast.cairo b/tests/nodes/less_fp16x16_broadcast.cairo index 2866d6378..787c07448 100644 --- a/tests/nodes/less_fp16x16_broadcast.cairo +++ b/tests/nodes/less_fp16x16_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_fp16x16_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.less(@input_1); + let y = input_0.less(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/less_fp16x16_broadcast/input_0.cairo b/tests/nodes/less_fp16x16_broadcast/input_0.cairo index bc61eeea2..18782c0dd 100644 --- a/tests/nodes/less_fp16x16_broadcast/input_0.cairo +++ b/tests/nodes/less_fp16x16_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16_broadcast/input_1.cairo b/tests/nodes/less_fp16x16_broadcast/input_1.cairo index 303d93806..743355c3d 100644 --- a/tests/nodes/less_fp16x16_broadcast/input_1.cairo +++ b/tests/nodes/less_fp16x16_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -12,6 +12,6 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16_broadcast/output_0.cairo b/tests/nodes/less_fp16x16_broadcast/output_0.cairo index 6f6745c19..7d4613a88 100644 --- a/tests/nodes/less_fp16x16_broadcast/output_0.cairo +++ b/tests/nodes/less_fp16x16_broadcast/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -11,17 +10,17 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); - data.append(1); data.append(0); - data.append(1); - data.append(1); - data.append(1); data.append(0); data.append(1); - data.append(1); + data.append(0); + data.append(0); data.append(1); data.append(0); data.append(0); + data.append(0); + data.append(0); + data.append(1); data.append(1); data.append(1); data.append(1); @@ -30,12 +29,12 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(1); - data.append(0); - data.append(1); data.append(1); data.append(1); data.append(1); data.append(0); data.append(0); + data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23.cairo b/tests/nodes/less_fp8x23.cairo index 6a6b5e97e..6fe7b08b8 100644 --- a/tests/nodes/less_fp8x23.cairo +++ b/tests/nodes/less_fp8x23.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_fp8x23() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.less(@input_1); + let y = input_0.less(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/less_fp8x23/input_0.cairo b/tests/nodes/less_fp8x23/input_0.cairo index 4016e2d82..fbcd9f2a8 100644 --- a/tests/nodes/less_fp8x23/input_0.cairo +++ b/tests/nodes/less_fp8x23/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23/input_1.cairo b/tests/nodes/less_fp8x23/input_1.cairo index 9a441b427..e27ba84da 100644 --- a/tests/nodes/less_fp8x23/input_1.cairo +++ b/tests/nodes/less_fp8x23/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23/output_0.cairo b/tests/nodes/less_fp8x23/output_0.cairo index 744c7f5a4..33906ca90 100644 --- a/tests/nodes/less_fp8x23/output_0.cairo +++ b/tests/nodes/less_fp8x23/output_0.cairo @@ -1,40 +1,39 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); + data.append(1); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(1); data.append(1); - data.append(1); - data.append(1); data.append(0); - data.append(1); - data.append(1); data.append(0); data.append(0); data.append(0); + data.append(1); + data.append(0); + data.append(1); data.append(0); data.append(0); data.append(0); data.append(1); data.append(1); data.append(1); - data.append(1); data.append(0); data.append(1); data.append(0); - data.append(0); + data.append(1); + data.append(1); + data.append(1); data.append(1); data.append(0); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/less_fp8x23_broadcast.cairo b/tests/nodes/less_fp8x23_broadcast.cairo index 5423670f7..e8b3155c5 100644 --- a/tests/nodes/less_fp8x23_broadcast.cairo +++ b/tests/nodes/less_fp8x23_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_fp8x23_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.less(@input_1); + let y = input_0.less(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/less_fp8x23_broadcast/input_0.cairo b/tests/nodes/less_fp8x23_broadcast/input_0.cairo index 5c0be3f96..88ad7277b 100644 --- a/tests/nodes/less_fp8x23_broadcast/input_0.cairo +++ b/tests/nodes/less_fp8x23_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23_broadcast/input_1.cairo b/tests/nodes/less_fp8x23_broadcast/input_1.cairo index c1b783165..29b68e7c3 100644 --- a/tests/nodes/less_fp8x23_broadcast/input_1.cairo +++ b/tests/nodes/less_fp8x23_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -10,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23_broadcast/output_0.cairo b/tests/nodes/less_fp8x23_broadcast/output_0.cairo index b926f44e6..fbf242193 100644 --- a/tests/nodes/less_fp8x23_broadcast/output_0.cairo +++ b/tests/nodes/less_fp8x23_broadcast/output_0.cairo @@ -1,41 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); + data.append(1); + data.append(0); data.append(0); data.append(0); data.append(0); data.append(0); data.append(0); - data.append(1); - data.append(1); data.append(0); data.append(0); data.append(0); - data.append(1); - data.append(1); - data.append(1); data.append(0); data.append(1); data.append(0); data.append(0); + data.append(0); data.append(1); data.append(0); + data.append(0); data.append(1); data.append(1); data.append(0); data.append(0); + data.append(0); + data.append(0); data.append(1); data.append(1); data.append(0); - data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32.cairo b/tests/nodes/less_i32.cairo index a2eaffdd4..4a251b995 100644 --- a/tests/nodes/less_i32.cairo +++ b/tests/nodes/less_i32.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::I32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_i32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.less(@input_1); + let y = input_0.less(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/less_i32/input_0.cairo b/tests/nodes/less_i32/input_0.cairo index 2797bb47f..ab59d73f2 100644 --- a/tests/nodes/less_i32/input_0.cairo +++ b/tests/nodes/less_i32/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorSub}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); + data.append(-1); data.append(-2); - data.append(1); + data.append(2); + data.append(2); data.append(2); data.append(-2); data.append(1); - data.append(-3); - data.append(2); data.append(1); - data.append(-1); + data.append(1); + data.append(0); data.append(-2); - data.append(-1); + data.append(-3); data.append(-1); data.append(-1); data.append(-2); data.append(2); - data.append(1); - data.append(1); - data.append(1); - data.append(1); + data.append(-2); data.append(-1); data.append(-3); - data.append(-1); data.append(0); data.append(0); + data.append(-3); + data.append(-3); + data.append(-2); + data.append(2); + data.append(-3); data.append(0); - data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32/input_1.cairo b/tests/nodes/less_i32/input_1.cairo index 69ac67e22..cadacc785 100644 --- a/tests/nodes/less_i32/input_1.cairo +++ b/tests/nodes/less_i32/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorSub}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-3); data.append(0); - data.append(-2); - data.append(-1); - data.append(2); data.append(-1); - data.append(-2); data.append(0); - data.append(-3); + data.append(-2); + data.append(2); data.append(1); data.append(-1); + data.append(-3); + data.append(-3); data.append(-2); data.append(-2); - data.append(0); - data.append(0); + data.append(2); data.append(-1); - data.append(-2); + data.append(-3); data.append(2); data.append(1); data.append(-2); - data.append(2); data.append(-1); - data.append(2); data.append(-2); - data.append(-2); - data.append(-3); + data.append(1); data.append(-3); + data.append(-1); + data.append(0); + data.append(1); + data.append(0); + data.append(2); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32/output_0.cairo b/tests/nodes/less_i32/output_0.cairo index eb28d8666..bff093e8b 100644 --- a/tests/nodes/less_i32/output_0.cairo +++ b/tests/nodes/less_i32/output_0.cairo @@ -1,41 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); + data.append(1); data.append(1); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(1); data.append(0); data.append(0); - data.append(1); - data.append(1); data.append(0); data.append(0); - data.append(1); + data.append(0); data.append(1); data.append(0); data.append(0); data.append(1); data.append(0); data.append(0); - data.append(1); + data.append(0); data.append(1); data.append(1); data.append(0); + data.append(1); + data.append(1); + data.append(1); data.append(0); - data.append(0); + data.append(1); data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32_broadcast.cairo b/tests/nodes/less_i32_broadcast.cairo index b71e77968..552150976 100644 --- a/tests/nodes/less_i32_broadcast.cairo +++ b/tests/nodes/less_i32_broadcast.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::I32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_i32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.less(@input_1); + let y = input_0.less(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/less_i32_broadcast/input_0.cairo b/tests/nodes/less_i32_broadcast/input_0.cairo index 7029ddf16..e49eaf809 100644 --- a/tests/nodes/less_i32_broadcast/input_0.cairo +++ b/tests/nodes/less_i32_broadcast/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorSub}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(-3); data.append(-2); + data.append(1); + data.append(-3); data.append(2); + data.append(-1); + data.append(-3); data.append(0); + data.append(2); + data.append(1); data.append(-1); + data.append(0); data.append(1); data.append(1); - data.append(2); data.append(0); - data.append(0); - data.append(-1); + data.append(1); data.append(-3); - data.append(-2); + data.append(2); data.append(1); - data.append(-2); + data.append(2); data.append(0); - data.append(-1); - data.append(-1); data.append(0); - data.append(2); - data.append(2); data.append(0); - data.append(-1); - data.append(1); + data.append(-3); + data.append(0); + data.append(2); data.append(-2); - data.append(1); - data.append(-1); - data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32_broadcast/input_1.cairo b/tests/nodes/less_i32_broadcast/input_1.cairo index 426461d10..99c50d44f 100644 --- a/tests/nodes/less_i32_broadcast/input_1.cairo +++ b/tests/nodes/less_i32_broadcast/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorSub}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -11,7 +10,7 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(-3); - data.append(1); + data.append(0); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32_broadcast/output_0.cairo b/tests/nodes/less_i32_broadcast/output_0.cairo index 45e698fa6..79967537c 100644 --- a/tests/nodes/less_i32_broadcast/output_0.cairo +++ b/tests/nodes/less_i32_broadcast/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -15,27 +14,27 @@ fn output_0() -> Tensor { data.append(0); data.append(1); data.append(0); - data.append(0); - data.append(0); + data.append(1); data.append(1); data.append(1); data.append(0); data.append(0); data.append(0); data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(1); data.append(0); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(1); data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); data.append(1); data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8.cairo b/tests/nodes/less_i8.cairo index 7b569982f..085a6da35 100644 --- a/tests/nodes/less_i8.cairo +++ b/tests/nodes/less_i8.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I8TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_i8() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.less(@input_1); + let y = input_0.less(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/less_i8/input_0.cairo b/tests/nodes/less_i8/input_0.cairo index e03f2cf94..28dd5a905 100644 --- a/tests/nodes/less_i8/input_0.cairo +++ b/tests/nodes/less_i8/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorSub}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-2); - data.append(1); - data.append(1); + data.append(-3); data.append(0); data.append(-3); - data.append(-1); + data.append(-2); data.append(-1); data.append(2); + data.append(-1); data.append(-3); + data.append(-1); data.append(-3); data.append(-3); - data.append(2); - data.append(-2); - data.append(0); data.append(-1); - data.append(2); - data.append(-3); - data.append(2); data.append(-1); - data.append(0); data.append(-2); - data.append(-1); - data.append(-1); + data.append(-2); data.append(-3); + data.append(-1); + data.append(1); data.append(-2); + data.append(0); + data.append(-3); + data.append(2); data.append(-3); data.append(-2); + data.append(2); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8/input_1.cairo b/tests/nodes/less_i8/input_1.cairo index 64effdde2..9dc4e7a6c 100644 --- a/tests/nodes/less_i8/input_1.cairo +++ b/tests/nodes/less_i8/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorSub}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(0); + data.append(-3); data.append(1); - data.append(0); - data.append(0); + data.append(-2); + data.append(-3); + data.append(2); + data.append(-1); data.append(0); data.append(-1); data.append(-2); - data.append(-2); - data.append(1); - data.append(-2); data.append(1); data.append(0); - data.append(-2); - data.append(-3); - data.append(-1); - data.append(-3); + data.append(2); data.append(-2); data.append(1); - data.append(1); - data.append(-1); data.append(-2); + data.append(-3); + data.append(2); + data.append(0); data.append(-1); - data.append(-1); + data.append(0); + data.append(0); data.append(-2); + data.append(2); data.append(-1); + data.append(2); + data.append(0); + data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8/output_0.cairo b/tests/nodes/less_i8/output_0.cairo index 4e7322ca4..9398cc8d3 100644 --- a/tests/nodes/less_i8/output_0.cairo +++ b/tests/nodes/less_i8/output_0.cairo @@ -1,20 +1,20 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); data.append(0); data.append(1); data.append(1); + data.append(0); + data.append(1); + data.append(0); data.append(1); data.append(1); data.append(0); @@ -25,17 +25,16 @@ fn output_0() -> Tensor { data.append(1); data.append(0); data.append(0); + data.append(1); data.append(0); data.append(1); data.append(0); + data.append(1); data.append(0); data.append(1); data.append(1); data.append(0); data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8_broadcast.cairo b/tests/nodes/less_i8_broadcast.cairo index e15d4b262..fb705a81d 100644 --- a/tests/nodes/less_i8_broadcast.cairo +++ b/tests/nodes/less_i8_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I8TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_i8_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.less(@input_1); + let y = input_0.less(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/less_i8_broadcast/input_0.cairo b/tests/nodes/less_i8_broadcast/input_0.cairo index 376342d8b..eac6c02fd 100644 --- a/tests/nodes/less_i8_broadcast/input_0.cairo +++ b/tests/nodes/less_i8_broadcast/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorSub}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(2); - data.append(-1); + data.append(-2); + data.append(-2); data.append(1); - data.append(0); data.append(1); + data.append(0); + data.append(0); data.append(-1); - data.append(-1); - data.append(-2); + data.append(-3); + data.append(0); data.append(2); - data.append(-1); data.append(-3); + data.append(1); + data.append(1); + data.append(2); data.append(-3); data.append(2); - data.append(0); - data.append(0); data.append(-3); - data.append(-2); data.append(2); - data.append(-2); - data.append(0); data.append(1); + data.append(0); + data.append(-1); + data.append(0); data.append(-1); data.append(1); - data.append(2); - data.append(-2); - data.append(-2); + data.append(0); data.append(-2); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8_broadcast/input_1.cairo b/tests/nodes/less_i8_broadcast/input_1.cairo index 98033a6e9..d593d06e4 100644 --- a/tests/nodes/less_i8_broadcast/input_1.cairo +++ b/tests/nodes/less_i8_broadcast/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorSub}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,8 +9,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); + data.append(-1); data.append(-2); - data.append(-2); - data.append(1); + data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8_broadcast/output_0.cairo b/tests/nodes/less_i8_broadcast/output_0.cairo index 878a9be68..f68e15ea8 100644 --- a/tests/nodes/less_i8_broadcast/output_0.cairo +++ b/tests/nodes/less_i8_broadcast/output_0.cairo @@ -1,32 +1,29 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); + data.append(1); + data.append(1); data.append(0); data.append(0); data.append(0); data.append(0); data.append(0); data.append(0); - data.append(1); - data.append(1); data.append(0); data.append(0); data.append(1); - data.append(1); data.append(0); data.append(0); data.append(0); data.append(1); - data.append(1); data.append(0); data.append(0); data.append(0); @@ -34,8 +31,10 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); - data.append(1); - data.append(1); - data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32.cairo b/tests/nodes/less_u32.cairo index 29b222561..412895527 100644 --- a/tests/nodes/less_u32.cairo +++ b/tests/nodes/less_u32.cairo @@ -3,12 +3,10 @@ mod input_1; mod output_0; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::U32TensorPartialEq; #[test] @@ -16,9 +14,9 @@ use orion::operators::tensor::U32TensorPartialEq; fn test_less_u32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.less(@input_1); + let y = input_0.less(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/less_u32/input_0.cairo b/tests/nodes/less_u32/input_0.cairo index b424b92ec..15b2924f3 100644 --- a/tests/nodes/less_u32/input_0.cairo +++ b/tests/nodes/less_u32/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(4); + data.append(0); data.append(2); - data.append(4); - data.append(4); - data.append(1); data.append(0); - data.append(1); - data.append(1); - data.append(1); + data.append(4); data.append(5); data.append(0); - data.append(0); + data.append(4); + data.append(4); + data.append(1); data.append(5); data.append(3); data.append(3); - data.append(4); + data.append(2); data.append(5); - data.append(4); data.append(5); + data.append(4); data.append(1); - data.append(2); - data.append(5); - data.append(3); - data.append(0); data.append(1); + data.append(4); + data.append(4); + data.append(2); + data.append(2); data.append(2); + data.append(4); + data.append(4); + data.append(3); data.append(5); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32/input_1.cairo b/tests/nodes/less_u32/input_1.cairo index 06e6ceb5f..e540f7f40 100644 --- a/tests/nodes/less_u32/input_1.cairo +++ b/tests/nodes/less_u32/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(3); - data.append(4); - data.append(2); data.append(0); + data.append(1); + data.append(5); + data.append(5); + data.append(2); + data.append(1); + data.append(1); data.append(2); data.append(1); - data.append(4); - data.append(3); - data.append(3); data.append(3); data.append(3); - data.append(4); - data.append(5); - data.append(5); + data.append(0); data.append(5); + data.append(2); + data.append(0); + data.append(0); data.append(0); data.append(4); data.append(1); - data.append(5); data.append(1); data.append(5); + data.append(4); + data.append(4); + data.append(2); data.append(3); + data.append(2); data.append(5); - data.append(0); - data.append(1); - data.append(5); - data.append(4); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32/output_0.cairo b/tests/nodes/less_u32/output_0.cairo index 0049de57d..75a278131 100644 --- a/tests/nodes/less_u32/output_0.cairo +++ b/tests/nodes/less_u32/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -11,20 +10,12 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); - data.append(1); - data.append(0); data.append(0); data.append(1); data.append(1); - data.append(1); - data.append(1); - data.append(1); data.append(0); data.append(1); - data.append(1); data.append(0); - data.append(1); - data.append(1); data.append(0); data.append(0); data.append(0); @@ -32,10 +23,18 @@ fn output_0() -> Tensor { data.append(0); data.append(1); data.append(0); + data.append(0); + data.append(0); + data.append(0); data.append(1); data.append(0); data.append(0); data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(0); data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32_broadcast.cairo b/tests/nodes/less_u32_broadcast.cairo index cd2df7282..9a7ac7a22 100644 --- a/tests/nodes/less_u32_broadcast.cairo +++ b/tests/nodes/less_u32_broadcast.cairo @@ -3,12 +3,10 @@ mod input_1; mod output_0; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::U32TensorPartialEq; #[test] @@ -16,9 +14,9 @@ use orion::operators::tensor::U32TensorPartialEq; fn test_less_u32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.less(@input_1); + let y = input_0.less(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/less_u32_broadcast/input_0.cairo b/tests/nodes/less_u32_broadcast/input_0.cairo index df8b0c90c..655814fc8 100644 --- a/tests/nodes/less_u32_broadcast/input_0.cairo +++ b/tests/nodes/less_u32_broadcast/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(4); + data.append(5); data.append(5); data.append(3); data.append(4); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(5); data.append(4); - data.append(3); + data.append(5); data.append(3); data.append(0); + data.append(5); data.append(3); - data.append(0); data.append(3); - data.append(5); - data.append(1); + data.append(0); data.append(4); + data.append(1); data.append(5); + data.append(0); + data.append(2); data.append(2); data.append(1); data.append(3); + data.append(5); + data.append(5); + data.append(5); + data.append(4); + data.append(5); + data.append(5); data.append(2); - data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32_broadcast/input_1.cairo b/tests/nodes/less_u32_broadcast/input_1.cairo index 790699956..bcb20d101 100644 --- a/tests/nodes/less_u32_broadcast/input_1.cairo +++ b/tests/nodes/less_u32_broadcast/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,8 +9,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(2); - data.append(5); data.append(3); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32_broadcast/output_0.cairo b/tests/nodes/less_u32_broadcast/output_0.cairo index 396cb18f5..ad7acc0af 100644 --- a/tests/nodes/less_u32_broadcast/output_0.cairo +++ b/tests/nodes/less_u32_broadcast/output_0.cairo @@ -1,41 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); data.append(0); data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(1); data.append(0); data.append(0); - data.append(1); - data.append(1); - data.append(1); data.append(0); - data.append(1); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(0); - data.append(1); - data.append(1); + data.append(0); data.append(0); data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/not_bool.cairo b/tests/nodes/not_bool.cairo index d5b30a089..cc73e1cd4 100644 --- a/tests/nodes/not_bool.cairo +++ b/tests/nodes/not_bool.cairo @@ -3,18 +3,18 @@ mod output_0; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::TensorTrait; use orion::operators::tensor::BoolTensor; -use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::BoolTensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; +use orion::utils::assert_eq; #[test] #[available_gas(2000000000)] fn test_not_bool() { let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.not(); + let y = input_0.not(); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/not_bool/input_0.cairo b/tests/nodes/not_bool/input_0.cairo index 17f34d5de..eef582ae5 100644 --- a/tests/nodes/not_bool/input_0.cairo +++ b/tests/nodes/not_bool/input_0.cairo @@ -3,7 +3,7 @@ use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::BoolTensor; fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); + let mut shape = ArrayTrait::new(); shape.append(1); shape.append(1); diff --git a/tests/nodes/not_bool/output_0.cairo b/tests/nodes/not_bool/output_0.cairo index 8b59aea96..43bb7750d 100644 --- a/tests/nodes/not_bool/output_0.cairo +++ b/tests/nodes/not_bool/output_0.cairo @@ -3,7 +3,7 @@ use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::BoolTensor; fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); + let mut shape = ArrayTrait::new(); shape.append(1); shape.append(1); diff --git a/tests/nodes/or_fp16x16.cairo b/tests/nodes/or_fp16x16.cairo index bd23ac7c4..6bc72e320 100644 --- a/tests/nodes/or_fp16x16.cairo +++ b/tests/nodes/or_fp16x16.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; #[test] #[available_gas(2000000000)] fn test_or_fp16x16() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.or(@input_1); + let y = input_0.or(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/or_fp16x16/input_0.cairo b/tests/nodes/or_fp16x16/input_0.cairo index d834a7f97..76289ddcc 100644 --- a/tests/nodes/or_fp16x16/input_0.cairo +++ b/tests/nodes/or_fp16x16/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_fp16x16/input_1.cairo b/tests/nodes/or_fp16x16/input_1.cairo index 397241eee..16f8a9735 100644 --- a/tests/nodes/or_fp16x16/input_1.cairo +++ b/tests/nodes/or_fp16x16/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_fp16x16/output_0.cairo b/tests/nodes/or_fp16x16/output_0.cairo index 77eb5ca09..07754b1eb 100644 --- a/tests/nodes/or_fp16x16/output_0.cairo +++ b/tests/nodes/or_fp16x16/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -18,7 +17,7 @@ fn output_0() -> Tensor { data.append(1); data.append(1); data.append(1); - data.append(1); + data.append(0); data.append(1); data.append(1); data.append(1); diff --git a/tests/nodes/or_fp16x16_broadcast.cairo b/tests/nodes/or_fp16x16_broadcast.cairo index 2a1a6d05a..008614487 100644 --- a/tests/nodes/or_fp16x16_broadcast.cairo +++ b/tests/nodes/or_fp16x16_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; #[test] #[available_gas(2000000000)] fn test_or_fp16x16_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.or(@input_1); + let y = input_0.or(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/or_fp16x16_broadcast/input_0.cairo b/tests/nodes/or_fp16x16_broadcast/input_0.cairo index 97aba6ffa..cea1413f2 100644 --- a/tests/nodes/or_fp16x16_broadcast/input_0.cairo +++ b/tests/nodes/or_fp16x16_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_fp16x16_broadcast/input_1.cairo b/tests/nodes/or_fp16x16_broadcast/input_1.cairo index 7e68b68a9..3d0d0a4bd 100644 --- a/tests/nodes/or_fp16x16_broadcast/input_1.cairo +++ b/tests/nodes/or_fp16x16_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_fp16x16_broadcast/output_0.cairo b/tests/nodes/or_fp16x16_broadcast/output_0.cairo index 0367c57b6..2041bdce3 100644 --- a/tests/nodes/or_fp16x16_broadcast/output_0.cairo +++ b/tests/nodes/or_fp16x16_broadcast/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); diff --git a/tests/nodes/or_fp8x23.cairo b/tests/nodes/or_fp8x23.cairo index a946bc909..fbd0d02c3 100644 --- a/tests/nodes/or_fp8x23.cairo +++ b/tests/nodes/or_fp8x23.cairo @@ -3,12 +3,12 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; use orion::utils::{assert_eq, assert_seq_eq}; #[test] @@ -16,9 +16,9 @@ use orion::utils::{assert_eq, assert_seq_eq}; fn test_or_fp8x23() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.or(@input_1); + let y = input_0.or(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/or_fp8x23/input_0.cairo b/tests/nodes/or_fp8x23/input_0.cairo index eac60eec6..e5c3797ea 100644 --- a/tests/nodes/or_fp8x23/input_0.cairo +++ b/tests/nodes/or_fp8x23/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -11,31 +11,31 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_fp8x23/input_1.cairo b/tests/nodes/or_fp8x23/input_1.cairo index 7395c8efc..225d49dcd 100644 --- a/tests/nodes/or_fp8x23/input_1.cairo +++ b/tests/nodes/or_fp8x23/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -11,31 +11,31 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_fp8x23/output_0.cairo b/tests/nodes/or_fp8x23/output_0.cairo index e391d000f..47d67b111 100644 --- a/tests/nodes/or_fp8x23/output_0.cairo +++ b/tests/nodes/or_fp8x23/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -25,7 +24,7 @@ fn output_0() -> Tensor { data.append(1); data.append(1); data.append(1); - data.append(0); + data.append(1); data.append(1); data.append(1); data.append(1); diff --git a/tests/nodes/or_fp8x23_broadcast.cairo b/tests/nodes/or_fp8x23_broadcast.cairo index 0cd4ea567..a286666c2 100644 --- a/tests/nodes/or_fp8x23_broadcast.cairo +++ b/tests/nodes/or_fp8x23_broadcast.cairo @@ -3,12 +3,12 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; use orion::utils::{assert_eq, assert_seq_eq}; #[test] @@ -16,9 +16,9 @@ use orion::utils::{assert_eq, assert_seq_eq}; fn test_or_fp8x23_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.or(@input_1); + let y = input_0.or(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/or_fp8x23_broadcast/input_0.cairo b/tests/nodes/or_fp8x23_broadcast/input_0.cairo index 247e51688..5c6660a18 100644 --- a/tests/nodes/or_fp8x23_broadcast/input_0.cairo +++ b/tests/nodes/or_fp8x23_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_fp8x23_broadcast/input_1.cairo b/tests/nodes/or_fp8x23_broadcast/input_1.cairo index 54e53e26a..adadfb8cb 100644 --- a/tests/nodes/or_fp8x23_broadcast/input_1.cairo +++ b/tests/nodes/or_fp8x23_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_fp8x23_broadcast/output_0.cairo b/tests/nodes/or_fp8x23_broadcast/output_0.cairo index 0367c57b6..2041bdce3 100644 --- a/tests/nodes/or_fp8x23_broadcast/output_0.cairo +++ b/tests/nodes/or_fp8x23_broadcast/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); diff --git a/tests/nodes/or_i32.cairo b/tests/nodes/or_i32.cairo index ce7f05023..e2aa6a99d 100644 --- a/tests/nodes/or_i32.cairo +++ b/tests/nodes/or_i32.cairo @@ -3,10 +3,12 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorMul}; use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; use orion::utils::{assert_eq, assert_seq_eq}; #[test] @@ -14,9 +16,9 @@ use orion::utils::{assert_eq, assert_seq_eq}; fn test_or_i32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.or(@input_1); + let y = input_0.or(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/or_i32/input_0.cairo b/tests/nodes/or_i32/input_0.cairo index 4f63cdac0..45a1a532b 100644 --- a/tests/nodes/or_i32/input_0.cairo +++ b/tests/nodes/or_i32/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorMul}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); data.append(-3); - data.append(2); + data.append(0); data.append(-2); data.append(1); - data.append(0); data.append(-3); - data.append(0); - data.append(-1); data.append(1); data.append(1); - data.append(-1); data.append(-3); data.append(1); - data.append(-3); + data.append(1); + data.append(0); data.append(-2); data.append(-3); + data.append(1); + data.append(0); + data.append(-1); + data.append(-1); data.append(-3); - data.append(2); data.append(-2); + data.append(-1); + data.append(-1); data.append(-2); data.append(1); - data.append(-2); - data.append(0); data.append(2); - data.append(1); - data.append(0); + data.append(-1); + data.append(-1); + data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_i32/input_1.cairo b/tests/nodes/or_i32/input_1.cairo index dc6bfe1ef..016db0a53 100644 --- a/tests/nodes/or_i32/input_1.cairo +++ b/tests/nodes/or_i32/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorMul}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(-1); + data.append(0); + data.append(-1); data.append(-2); - data.append(-3); data.append(1); - data.append(2); - data.append(-2); - data.append(-2); + data.append(0); data.append(-1); data.append(1); - data.append(2); - data.append(-2); data.append(-3); - data.append(0); - data.append(1); data.append(-2); data.append(-3); - data.append(2); + data.append(-1); data.append(-3); - data.append(1); - data.append(0); - data.append(-2); - data.append(0); data.append(-3); data.append(2); data.append(-1); - data.append(1); + data.append(0); + data.append(0); + data.append(-2); + data.append(-2); data.append(2); + data.append(-2); + data.append(-2); + data.append(-1); + data.append(-3); + data.append(-3); data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_i32/output_0.cairo b/tests/nodes/or_i32/output_0.cairo index 77eb5ca09..95194b767 100644 --- a/tests/nodes/or_i32/output_0.cairo +++ b/tests/nodes/or_i32/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -11,7 +10,7 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); - data.append(1); + data.append(0); data.append(1); data.append(1); data.append(1); diff --git a/tests/nodes/or_i32_broadcast.cairo b/tests/nodes/or_i32_broadcast.cairo index f2265d497..a8a5caee8 100644 --- a/tests/nodes/or_i32_broadcast.cairo +++ b/tests/nodes/or_i32_broadcast.cairo @@ -3,10 +3,12 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorMul}; use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; use orion::utils::{assert_eq, assert_seq_eq}; #[test] @@ -14,9 +16,9 @@ use orion::utils::{assert_eq, assert_seq_eq}; fn test_or_i32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.or(@input_1); + let y = input_0.or(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/or_i32_broadcast/input_0.cairo b/tests/nodes/or_i32_broadcast/input_0.cairo index 224a20b1e..eded6f98f 100644 --- a/tests/nodes/or_i32_broadcast/input_0.cairo +++ b/tests/nodes/or_i32_broadcast/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorMul}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,8 +9,8 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); - data.append(-3); + data.append(1); data.append(2); - data.append(0); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_i32_broadcast/input_1.cairo b/tests/nodes/or_i32_broadcast/input_1.cairo index 8b33cf367..09e58e0d8 100644 --- a/tests/nodes/or_i32_broadcast/input_1.cairo +++ b/tests/nodes/or_i32_broadcast/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorMul}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,7 +8,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); + data.append(-3); + data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_i32_broadcast/output_0.cairo b/tests/nodes/or_i32_broadcast/output_0.cairo index 0367c57b6..2041bdce3 100644 --- a/tests/nodes/or_i32_broadcast/output_0.cairo +++ b/tests/nodes/or_i32_broadcast/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); diff --git a/tests/nodes/or_i8.cairo b/tests/nodes/or_i8.cairo index bc8e26c0e..b97720435 100644 --- a/tests/nodes/or_i8.cairo +++ b/tests/nodes/or_i8.cairo @@ -3,12 +3,12 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::I8TensorPartialEq; +use orion::operators::tensor::{I8Tensor, I8TensorMul}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; #[test] @@ -16,9 +16,9 @@ use orion::utils::{assert_eq, assert_seq_eq}; fn test_or_i8() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.or(@input_1); + let y = input_0.or(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/or_i8/input_0.cairo b/tests/nodes/or_i8/input_0.cairo index 28d5be328..2e3cfaefb 100644 --- a/tests/nodes/or_i8/input_0.cairo +++ b/tests/nodes/or_i8/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorMul}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-2); - data.append(-3); - data.append(-3); - data.append(0); - data.append(2); - data.append(-2); data.append(0); - data.append(0); - data.append(2); + data.append(-1); data.append(1); + data.append(2); data.append(1); data.append(2); data.append(-1); + data.append(2); + data.append(1); + data.append(1); data.append(0); - data.append(0); + data.append(2); data.append(-2); - data.append(-3); data.append(1); - data.append(-2); data.append(0); - data.append(-1); data.append(1); - data.append(-2); - data.append(-2); + data.append(0); + data.append(2); + data.append(0); + data.append(2); + data.append(0); + data.append(2); data.append(1); - data.append(-1); data.append(2); + data.append(1); + data.append(-3); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_i8/input_1.cairo b/tests/nodes/or_i8/input_1.cairo index 4810b255f..fa6f014b9 100644 --- a/tests/nodes/or_i8/input_1.cairo +++ b/tests/nodes/or_i8/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorMul}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(-3); data.append(-2); - data.append(1); - data.append(0); data.append(-1); + data.append(-1); + data.append(1); data.append(0); + data.append(1); data.append(-3); - data.append(-2); data.append(1); data.append(-1); - data.append(1); + data.append(0); data.append(-1); + data.append(0); + data.append(-3); + data.append(2); + data.append(-2); data.append(-2); - data.append(1); + data.append(2); + data.append(2); + data.append(2); data.append(2); data.append(-1); - data.append(-1); - data.append(-3); + data.append(2); data.append(-3); - data.append(1); - data.append(-2); - data.append(1); data.append(-1); data.append(-1); data.append(-3); - data.append(2); - data.append(2); - data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_i8/output_0.cairo b/tests/nodes/or_i8/output_0.cairo index 77eb5ca09..8e18a844b 100644 --- a/tests/nodes/or_i8/output_0.cairo +++ b/tests/nodes/or_i8/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -20,7 +19,7 @@ fn output_0() -> Tensor { data.append(1); data.append(1); data.append(1); - data.append(1); + data.append(0); data.append(1); data.append(1); data.append(1); diff --git a/tests/nodes/or_i8_broadcast.cairo b/tests/nodes/or_i8_broadcast.cairo index 4abc3dda7..a2d044fc7 100644 --- a/tests/nodes/or_i8_broadcast.cairo +++ b/tests/nodes/or_i8_broadcast.cairo @@ -3,12 +3,12 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::I8TensorPartialEq; +use orion::operators::tensor::{I8Tensor, I8TensorMul}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; #[test] @@ -16,9 +16,9 @@ use orion::utils::{assert_eq, assert_seq_eq}; fn test_or_i8_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.or(@input_1); + let y = input_0.or(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/or_i8_broadcast/input_0.cairo b/tests/nodes/or_i8_broadcast/input_0.cairo index 2f89a2efc..72331f26f 100644 --- a/tests/nodes/or_i8_broadcast/input_0.cairo +++ b/tests/nodes/or_i8_broadcast/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorMul}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,9 +8,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); + data.append(-1); + data.append(-2); data.append(2); - data.append(2); - data.append(0); - data.append(1); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_i8_broadcast/input_1.cairo b/tests/nodes/or_i8_broadcast/input_1.cairo index 0c38cf889..d45d6dc9e 100644 --- a/tests/nodes/or_i8_broadcast/input_1.cairo +++ b/tests/nodes/or_i8_broadcast/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorMul}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,7 +8,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-1); data.append(-3); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_i8_broadcast/output_0.cairo b/tests/nodes/or_i8_broadcast/output_0.cairo index 0367c57b6..2041bdce3 100644 --- a/tests/nodes/or_i8_broadcast/output_0.cairo +++ b/tests/nodes/or_i8_broadcast/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); diff --git a/tests/nodes/or_u32.cairo b/tests/nodes/or_u32.cairo index 9cff3f789..9d3007f49 100644 --- a/tests/nodes/or_u32.cairo +++ b/tests/nodes/or_u32.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::U32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; #[test] #[available_gas(2000000000)] fn test_or_u32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.or(@input_1); + let y = input_0.or(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/or_u32/input_0.cairo b/tests/nodes/or_u32/input_0.cairo index 2b85e47b8..255e0877e 100644 --- a/tests/nodes/or_u32/input_0.cairo +++ b/tests/nodes/or_u32/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(1); data.append(5); - data.append(4); - data.append(4); data.append(5); data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); data.append(5); + data.append(4); + data.append(0); data.append(5); data.append(4); + data.append(2); data.append(3); data.append(1); data.append(0); - data.append(2); - data.append(3); data.append(1); - data.append(3); - data.append(4); data.append(5); + data.append(0); data.append(5); + data.append(2); data.append(3); data.append(0); data.append(0); - data.append(2); data.append(5); - data.append(4); - data.append(1); data.append(5); - data.append(3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_u32/input_1.cairo b/tests/nodes/or_u32/input_1.cairo index 77b154db3..53e2c0f87 100644 --- a/tests/nodes/or_u32/input_1.cairo +++ b/tests/nodes/or_u32/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); - data.append(5); data.append(4); - data.append(0); - data.append(5); - data.append(1); data.append(1); data.append(5); - data.append(4); + data.append(3); + data.append(3); + data.append(0); data.append(0); data.append(4); + data.append(4); + data.append(1); + data.append(1); + data.append(1); data.append(5); - data.append(0); data.append(3); data.append(2); + data.append(2); + data.append(5); data.append(4); data.append(3); - data.append(5); - data.append(2); - data.append(0); data.append(4); - data.append(0); - data.append(0); data.append(2); + data.append(4); + data.append(5); + data.append(4); data.append(2); + data.append(1); data.append(5); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_u32/output_0.cairo b/tests/nodes/or_u32/output_0.cairo index 221f32074..70e2b10b7 100644 --- a/tests/nodes/or_u32/output_0.cairo +++ b/tests/nodes/or_u32/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -14,13 +13,14 @@ fn output_0() -> Tensor { data.append(1); data.append(1); data.append(1); + data.append(1); + data.append(0); data.append(0); data.append(1); data.append(1); data.append(1); data.append(1); data.append(1); - data.append(0); data.append(1); data.append(1); data.append(1); @@ -30,7 +30,6 @@ fn output_0() -> Tensor { data.append(1); data.append(1); data.append(1); - data.append(0); data.append(1); data.append(1); data.append(1); diff --git a/tests/nodes/or_u32_broadcast.cairo b/tests/nodes/or_u32_broadcast.cairo index 13094f778..aa697b02d 100644 --- a/tests/nodes/or_u32_broadcast.cairo +++ b/tests/nodes/or_u32_broadcast.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::U32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::utils::{assert_eq, assert_seq_eq}; #[test] #[available_gas(2000000000)] fn test_or_u32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.or(@input_1); + let y = input_0.or(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/or_u32_broadcast/input_0.cairo b/tests/nodes/or_u32_broadcast/input_0.cairo index dd8ecf366..888c3fd7b 100644 --- a/tests/nodes/or_u32_broadcast/input_0.cairo +++ b/tests/nodes/or_u32_broadcast/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,9 +8,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(5); - data.append(1); - data.append(0); + data.append(3); data.append(2); + data.append(2); + data.append(3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_u32_broadcast/input_1.cairo b/tests/nodes/or_u32_broadcast/input_1.cairo index 7ee1bbea4..acdc8015e 100644 --- a/tests/nodes/or_u32_broadcast/input_1.cairo +++ b/tests/nodes/or_u32_broadcast/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,7 +8,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(3); - data.append(2); + data.append(0); + data.append(5); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/or_u32_broadcast/output_0.cairo b/tests/nodes/or_u32_broadcast/output_0.cairo index 0367c57b6..2041bdce3 100644 --- a/tests/nodes/or_u32_broadcast/output_0.cairo +++ b/tests/nodes/or_u32_broadcast/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorMul}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); diff --git a/tests/nodes/reduce_sum_default_axes_keepdims.cairo b/tests/nodes/reduce_sum_default_axes_keepdims.cairo deleted file mode 100644 index 483b37d20..000000000 --- a/tests/nodes/reduce_sum_default_axes_keepdims.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_default_axes_keepdims() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reduce_sum(Option::Some(array![].span()), Option::Some(true), Option::None); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reduce_sum_default_axes_keepdims/input_0.cairo b/tests/nodes/reduce_sum_default_axes_keepdims/input_0.cairo deleted file mode 100644 index 2de5818c3..000000000 --- a/tests/nodes/reduce_sum_default_axes_keepdims/input_0.cairo +++ /dev/null @@ -1,26 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_default_axes_keepdims/output_0.cairo b/tests/nodes/reduce_sum_default_axes_keepdims/output_0.cairo deleted file mode 100644 index 6cc93d6f7..000000000 --- a/tests/nodes/reduce_sum_default_axes_keepdims/output_0.cairo +++ /dev/null @@ -1,15 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(1); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(78); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_empty_axes_input_noop/input_0.cairo b/tests/nodes/reduce_sum_empty_axes_input_noop/input_0.cairo deleted file mode 100644 index 2de5818c3..000000000 --- a/tests/nodes/reduce_sum_empty_axes_input_noop/input_0.cairo +++ /dev/null @@ -1,26 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_empty_axes_input_noop/output_0.cairo b/tests/nodes/reduce_sum_empty_axes_input_noop/output_0.cairo deleted file mode 100644 index d679605a0..000000000 --- a/tests/nodes/reduce_sum_empty_axes_input_noop/output_0.cairo +++ /dev/null @@ -1,26 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_keep_dims.cairo b/tests/nodes/reduce_sum_keep_dims.cairo deleted file mode 100644 index 661d3711f..000000000 --- a/tests/nodes/reduce_sum_keep_dims.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_keep_dims() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reduce_sum(Option::Some(array![1].span()), Option::Some(true), Option::None); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reduce_sum_keep_dims/input_0.cairo b/tests/nodes/reduce_sum_keep_dims/input_0.cairo deleted file mode 100644 index 2de5818c3..000000000 --- a/tests/nodes/reduce_sum_keep_dims/input_0.cairo +++ /dev/null @@ -1,26 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_keep_dims/output_0.cairo b/tests/nodes/reduce_sum_keep_dims/output_0.cairo deleted file mode 100644 index 5326997d6..000000000 --- a/tests/nodes/reduce_sum_keep_dims/output_0.cairo +++ /dev/null @@ -1,20 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(1); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(4); - data.append(6); - data.append(12); - data.append(14); - data.append(20); - data.append(22); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_no_keep_dims.cairo b/tests/nodes/reduce_sum_no_keep_dims.cairo deleted file mode 100644 index a83405f01..000000000 --- a/tests/nodes/reduce_sum_no_keep_dims.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_no_keep_dims() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reduce_sum(Option::Some(array![1].span()), Option::Some(false), Option::None); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reduce_sum_no_keep_dims/input_0.cairo b/tests/nodes/reduce_sum_no_keep_dims/input_0.cairo deleted file mode 100644 index 2de5818c3..000000000 --- a/tests/nodes/reduce_sum_no_keep_dims/input_0.cairo +++ /dev/null @@ -1,26 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_no_keep_dims/output_0.cairo b/tests/nodes/reduce_sum_no_keep_dims/output_0.cairo deleted file mode 100644 index 72c71a185..000000000 --- a/tests/nodes/reduce_sum_no_keep_dims/output_0.cairo +++ /dev/null @@ -1,19 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(4); - data.append(6); - data.append(12); - data.append(14); - data.append(20); - data.append(22); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_1D/output_0.cairo b/tests/nodes/reduce_sum_single_axis_fp16x16_1D/output_0.cairo deleted file mode 100644 index db089f852..000000000 --- a/tests/nodes/reduce_sum_single_axis_fp16x16_1D/output_0.cairo +++ /dev/null @@ -1,13 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 196608, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_axis_1/output_0.cairo b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_axis_1/output_0.cairo deleted file mode 100644 index 11bc960a2..000000000 --- a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_axis_1/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 327680, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_default/output_0.cairo b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_default/output_0.cairo deleted file mode 100644 index d3b81daf0..000000000 --- a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_default/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 262144, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims.cairo b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims.cairo deleted file mode 100644 index 4a587072e..000000000 --- a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_single_axis_fp16x16_2D_keepdims() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reduce_sum_single_axis(0, true); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims/input_0.cairo b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims/input_0.cairo deleted file mode 100644 index 6a8b7cb09..000000000 --- a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims/output_0.cairo b/tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims/output_0.cairo deleted file mode 100644 index cadc8f1f9..000000000 --- a/tests/nodes/reduce_sum_single_axis_fp16x16_2D_keepdims/output_0.cairo +++ /dev/null @@ -1,15 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 262144, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_1D/output_0.cairo b/tests/nodes/reduce_sum_single_axis_fp8x23_1D/output_0.cairo deleted file mode 100644 index 713f0d28a..000000000 --- a/tests/nodes/reduce_sum_single_axis_fp8x23_1D/output_0.cairo +++ /dev/null @@ -1,13 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 25165824, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1/input_0.cairo b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1/input_0.cairo deleted file mode 100644 index 88c1db446..000000000 --- a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1/output_0.cairo b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1/output_0.cairo deleted file mode 100644 index 4b3e55bbe..000000000 --- a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_axis_1/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 41943040, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_default/input_0.cairo b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_default/input_0.cairo deleted file mode 100644 index 88c1db446..000000000 --- a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_default/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_default/output_0.cairo b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_default/output_0.cairo deleted file mode 100644 index 12e4ce95f..000000000 --- a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_default/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims/input_0.cairo b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims/input_0.cairo deleted file mode 100644 index 88c1db446..000000000 --- a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims/output_0.cairo b/tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims/output_0.cairo deleted file mode 100644 index 3435050a2..000000000 --- a/tests/nodes/reduce_sum_single_axis_fp8x23_2D_keepdims/output_0.cairo +++ /dev/null @@ -1,15 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_i32_1D.cairo b/tests/nodes/reduce_sum_single_axis_i32_1D.cairo deleted file mode 100644 index 7619c0ee4..000000000 --- a/tests/nodes/reduce_sum_single_axis_i32_1D.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_single_axis_i32_1D() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reduce_sum_single_axis(0, false); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reduce_sum_single_axis_i32_1D/input_0.cairo b/tests/nodes/reduce_sum_single_axis_i32_1D/input_0.cairo deleted file mode 100644 index a16657995..000000000 --- a/tests/nodes/reduce_sum_single_axis_i32_1D/input_0.cairo +++ /dev/null @@ -1,15 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_i32_1D/output_0.cairo b/tests/nodes/reduce_sum_single_axis_i32_1D/output_0.cairo deleted file mode 100644 index d1b50c386..000000000 --- a/tests/nodes/reduce_sum_single_axis_i32_1D/output_0.cairo +++ /dev/null @@ -1,13 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(3); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_i32_2D_axis_1.cairo b/tests/nodes/reduce_sum_single_axis_i32_2D_axis_1.cairo deleted file mode 100644 index 9cd61ef22..000000000 --- a/tests/nodes/reduce_sum_single_axis_i32_2D_axis_1.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_single_axis_i32_2D_axis_1() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reduce_sum_single_axis(1, false); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reduce_sum_single_axis_i32_2D_axis_1/input_0.cairo b/tests/nodes/reduce_sum_single_axis_i32_2D_axis_1/input_0.cairo deleted file mode 100644 index 2a164a41a..000000000 --- a/tests/nodes/reduce_sum_single_axis_i32_2D_axis_1/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_i32_2D_axis_1/output_0.cairo b/tests/nodes/reduce_sum_single_axis_i32_2D_axis_1/output_0.cairo deleted file mode 100644 index b9cf7c45d..000000000 --- a/tests/nodes/reduce_sum_single_axis_i32_2D_axis_1/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(5); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_i32_2D_default.cairo b/tests/nodes/reduce_sum_single_axis_i32_2D_default.cairo deleted file mode 100644 index cb28aac41..000000000 --- a/tests/nodes/reduce_sum_single_axis_i32_2D_default.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_single_axis_i32_2D_default() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reduce_sum_single_axis(0, false); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reduce_sum_single_axis_i32_2D_default/input_0.cairo b/tests/nodes/reduce_sum_single_axis_i32_2D_default/input_0.cairo deleted file mode 100644 index 2a164a41a..000000000 --- a/tests/nodes/reduce_sum_single_axis_i32_2D_default/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_i32_2D_default/output_0.cairo b/tests/nodes/reduce_sum_single_axis_i32_2D_default/output_0.cairo deleted file mode 100644 index f1b1716d2..000000000 --- a/tests/nodes/reduce_sum_single_axis_i32_2D_default/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(2); - data.append(4); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_i32_2D_keepdims.cairo b/tests/nodes/reduce_sum_single_axis_i32_2D_keepdims.cairo deleted file mode 100644 index 390f3742c..000000000 --- a/tests/nodes/reduce_sum_single_axis_i32_2D_keepdims.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_single_axis_i32_2D_keepdims() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reduce_sum_single_axis(0, true); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reduce_sum_single_axis_i32_2D_keepdims/input_0.cairo b/tests/nodes/reduce_sum_single_axis_i32_2D_keepdims/input_0.cairo deleted file mode 100644 index 2a164a41a..000000000 --- a/tests/nodes/reduce_sum_single_axis_i32_2D_keepdims/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_i32_2D_keepdims/output_0.cairo b/tests/nodes/reduce_sum_single_axis_i32_2D_keepdims/output_0.cairo deleted file mode 100644 index cbcfa21d7..000000000 --- a/tests/nodes/reduce_sum_single_axis_i32_2D_keepdims/output_0.cairo +++ /dev/null @@ -1,15 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(2); - data.append(4); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_i8_1D.cairo b/tests/nodes/reduce_sum_single_axis_i8_1D.cairo deleted file mode 100644 index 7a3059a32..000000000 --- a/tests/nodes/reduce_sum_single_axis_i8_1D.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_single_axis_i8_1D() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reduce_sum_single_axis(0, false); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reduce_sum_single_axis_i8_1D/output_0.cairo b/tests/nodes/reduce_sum_single_axis_i8_1D/output_0.cairo deleted file mode 100644 index 3a2fc6193..000000000 --- a/tests/nodes/reduce_sum_single_axis_i8_1D/output_0.cairo +++ /dev/null @@ -1,13 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 3, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_i8_2D_axis_1.cairo b/tests/nodes/reduce_sum_single_axis_i8_2D_axis_1.cairo deleted file mode 100644 index 381c69ab2..000000000 --- a/tests/nodes/reduce_sum_single_axis_i8_2D_axis_1.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_single_axis_i8_2D_axis_1() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reduce_sum_single_axis(1, false); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reduce_sum_single_axis_i8_2D_axis_1/output_0.cairo b/tests/nodes/reduce_sum_single_axis_i8_2D_axis_1/output_0.cairo deleted file mode 100644 index 26f2c2987..000000000 --- a/tests/nodes/reduce_sum_single_axis_i8_2D_axis_1/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 1, sign: false }); - data.append(FP8x23 { mag: 5, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_i8_2D_default.cairo b/tests/nodes/reduce_sum_single_axis_i8_2D_default.cairo deleted file mode 100644 index 09d98cd79..000000000 --- a/tests/nodes/reduce_sum_single_axis_i8_2D_default.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_single_axis_i8_2D_default() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reduce_sum_single_axis(0, false); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reduce_sum_single_axis_i8_2D_default/input_0.cairo b/tests/nodes/reduce_sum_single_axis_i8_2D_default/input_0.cairo deleted file mode 100644 index 53f6405e1..000000000 --- a/tests/nodes/reduce_sum_single_axis_i8_2D_default/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 1, sign: false }); - data.append(FP8x23 { mag: 2, sign: false }); - data.append(FP8x23 { mag: 3, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_i8_2D_default/output_0.cairo b/tests/nodes/reduce_sum_single_axis_i8_2D_default/output_0.cairo deleted file mode 100644 index de769f8c0..000000000 --- a/tests/nodes/reduce_sum_single_axis_i8_2D_default/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 2, sign: false }); - data.append(FP8x23 { mag: 4, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_i8_2D_keepdims.cairo b/tests/nodes/reduce_sum_single_axis_i8_2D_keepdims.cairo deleted file mode 100644 index 2af82ece3..000000000 --- a/tests/nodes/reduce_sum_single_axis_i8_2D_keepdims.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_single_axis_i8_2D_keepdims() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reduce_sum_single_axis(0, true); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reduce_sum_single_axis_i8_2D_keepdims/input_0.cairo b/tests/nodes/reduce_sum_single_axis_i8_2D_keepdims/input_0.cairo deleted file mode 100644 index 53f6405e1..000000000 --- a/tests/nodes/reduce_sum_single_axis_i8_2D_keepdims/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 1, sign: false }); - data.append(FP8x23 { mag: 2, sign: false }); - data.append(FP8x23 { mag: 3, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_i8_2D_keepdims/output_0.cairo b/tests/nodes/reduce_sum_single_axis_i8_2D_keepdims/output_0.cairo deleted file mode 100644 index b45c5c277..000000000 --- a/tests/nodes/reduce_sum_single_axis_i8_2D_keepdims/output_0.cairo +++ /dev/null @@ -1,15 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 2, sign: false }); - data.append(FP8x23 { mag: 4, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_u32_1D.cairo b/tests/nodes/reduce_sum_single_axis_u32_1D.cairo deleted file mode 100644 index e266d2e04..000000000 --- a/tests/nodes/reduce_sum_single_axis_u32_1D.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_single_axis_u32_1D() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reduce_sum_single_axis(0, false); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reduce_sum_single_axis_u32_1D/output_0.cairo b/tests/nodes/reduce_sum_single_axis_u32_1D/output_0.cairo deleted file mode 100644 index bd3b7eea9..000000000 --- a/tests/nodes/reduce_sum_single_axis_u32_1D/output_0.cairo +++ /dev/null @@ -1,13 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(3); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_u32_2D_axis_1.cairo b/tests/nodes/reduce_sum_single_axis_u32_2D_axis_1.cairo deleted file mode 100644 index fcd06fb15..000000000 --- a/tests/nodes/reduce_sum_single_axis_u32_2D_axis_1.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_single_axis_u32_2D_axis_1() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reduce_sum_single_axis(1, false); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reduce_sum_single_axis_u32_2D_axis_1/input_0.cairo b/tests/nodes/reduce_sum_single_axis_u32_2D_axis_1/input_0.cairo deleted file mode 100644 index 530126fb5..000000000 --- a/tests/nodes/reduce_sum_single_axis_u32_2D_axis_1/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_u32_2D_axis_1/output_0.cairo b/tests/nodes/reduce_sum_single_axis_u32_2D_axis_1/output_0.cairo deleted file mode 100644 index 1bee2ed67..000000000 --- a/tests/nodes/reduce_sum_single_axis_u32_2D_axis_1/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(5); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_u32_2D_default.cairo b/tests/nodes/reduce_sum_single_axis_u32_2D_default.cairo deleted file mode 100644 index 312619beb..000000000 --- a/tests/nodes/reduce_sum_single_axis_u32_2D_default.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_single_axis_u32_2D_default() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reduce_sum_single_axis(0, false); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reduce_sum_single_axis_u32_2D_default/input_0.cairo b/tests/nodes/reduce_sum_single_axis_u32_2D_default/input_0.cairo deleted file mode 100644 index 530126fb5..000000000 --- a/tests/nodes/reduce_sum_single_axis_u32_2D_default/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_u32_2D_default/output_0.cairo b/tests/nodes/reduce_sum_single_axis_u32_2D_default/output_0.cairo deleted file mode 100644 index 5aeddf000..000000000 --- a/tests/nodes/reduce_sum_single_axis_u32_2D_default/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(2); - data.append(4); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_u32_2D_keepdims.cairo b/tests/nodes/reduce_sum_single_axis_u32_2D_keepdims.cairo deleted file mode 100644 index 124af02d5..000000000 --- a/tests/nodes/reduce_sum_single_axis_u32_2D_keepdims.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_single_axis_u32_2D_keepdims() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reduce_sum_single_axis(0, true); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reduce_sum_single_axis_u32_2D_keepdims/input_0.cairo b/tests/nodes/reduce_sum_single_axis_u32_2D_keepdims/input_0.cairo deleted file mode 100644 index 530126fb5..000000000 --- a/tests/nodes/reduce_sum_single_axis_u32_2D_keepdims/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_single_axis_u32_2D_keepdims/output_0.cairo b/tests/nodes/reduce_sum_single_axis_u32_2D_keepdims/output_0.cairo deleted file mode 100644 index 5b761b563..000000000 --- a/tests/nodes/reduce_sum_single_axis_u32_2D_keepdims/output_0.cairo +++ /dev/null @@ -1,15 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(2); - data.append(4); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reshape_extended_dims.cairo b/tests/nodes/reshape_extended_dims.cairo deleted file mode 100644 index 80a7c5cf2..000000000 --- a/tests/nodes/reshape_extended_dims.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; - -#[test] -#[available_gas(2000000000)] -fn test_reshape_extended_dims() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reshape(array![2, 3, 2, 2].span()); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reshape_extended_dims/input_0.cairo b/tests/nodes/reshape_extended_dims/input_0.cairo deleted file mode 100644 index 1bf8d2578..000000000 --- a/tests/nodes/reshape_extended_dims/input_0.cairo +++ /dev/null @@ -1,38 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(3); - shape.append(4); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reshape_extended_dims/output_0.cairo b/tests/nodes/reshape_extended_dims/output_0.cairo deleted file mode 100644 index de2f5850b..000000000 --- a/tests/nodes/reshape_extended_dims/output_0.cairo +++ /dev/null @@ -1,39 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(3); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reshape_negative_dim.cairo b/tests/nodes/reshape_negative_dim.cairo deleted file mode 100644 index 28230dfaa..000000000 --- a/tests/nodes/reshape_negative_dim.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; - -#[test] -#[available_gas(2000000000)] -fn test_reshape_negative_dim() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reshape(array![2, -1, 2].span()); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reshape_negative_dim/input_0.cairo b/tests/nodes/reshape_negative_dim/input_0.cairo deleted file mode 100644 index 1bf8d2578..000000000 --- a/tests/nodes/reshape_negative_dim/input_0.cairo +++ /dev/null @@ -1,38 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(3); - shape.append(4); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reshape_negative_dim/output_0.cairo b/tests/nodes/reshape_negative_dim/output_0.cairo deleted file mode 100644 index ad355bfd6..000000000 --- a/tests/nodes/reshape_negative_dim/output_0.cairo +++ /dev/null @@ -1,38 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(6); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reshape_negative_extended_dims.cairo b/tests/nodes/reshape_negative_extended_dims.cairo deleted file mode 100644 index 58e4e2440..000000000 --- a/tests/nodes/reshape_negative_extended_dims.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; - -#[test] -#[available_gas(2000000000)] -fn test_reshape_negative_extended_dims() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reshape(array![-1, 2, 3, 4].span()); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reshape_negative_extended_dims/input_0.cairo b/tests/nodes/reshape_negative_extended_dims/input_0.cairo deleted file mode 100644 index 1bf8d2578..000000000 --- a/tests/nodes/reshape_negative_extended_dims/input_0.cairo +++ /dev/null @@ -1,38 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(3); - shape.append(4); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reshape_negative_extended_dims/output_0.cairo b/tests/nodes/reshape_negative_extended_dims/output_0.cairo deleted file mode 100644 index 66d21516d..000000000 --- a/tests/nodes/reshape_negative_extended_dims/output_0.cairo +++ /dev/null @@ -1,39 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - shape.append(3); - shape.append(4); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reshape_one_dim.cairo b/tests/nodes/reshape_one_dim.cairo deleted file mode 100644 index d1d7ec8ea..000000000 --- a/tests/nodes/reshape_one_dim.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; - -#[test] -#[available_gas(2000000000)] -fn test_reshape_one_dim() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reshape(array![24].span()); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reshape_one_dim/input_0.cairo b/tests/nodes/reshape_one_dim/input_0.cairo deleted file mode 100644 index 1bf8d2578..000000000 --- a/tests/nodes/reshape_one_dim/input_0.cairo +++ /dev/null @@ -1,38 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(3); - shape.append(4); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reshape_one_dim/output_0.cairo b/tests/nodes/reshape_one_dim/output_0.cairo deleted file mode 100644 index a6ad8efcb..000000000 --- a/tests/nodes/reshape_one_dim/output_0.cairo +++ /dev/null @@ -1,36 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(24); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reshape_reduced_dims.cairo b/tests/nodes/reshape_reduced_dims.cairo deleted file mode 100644 index 4025a95a3..000000000 --- a/tests/nodes/reshape_reduced_dims.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; - -#[test] -#[available_gas(2000000000)] -fn test_reshape_reduced_dims() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reshape(array![2,12].span()); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reshape_reduced_dims/input_0.cairo b/tests/nodes/reshape_reduced_dims/input_0.cairo deleted file mode 100644 index 1bf8d2578..000000000 --- a/tests/nodes/reshape_reduced_dims/input_0.cairo +++ /dev/null @@ -1,38 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(3); - shape.append(4); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reshape_reduced_dims/output_0.cairo b/tests/nodes/reshape_reduced_dims/output_0.cairo deleted file mode 100644 index 3ab9777df..000000000 --- a/tests/nodes/reshape_reduced_dims/output_0.cairo +++ /dev/null @@ -1,37 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(12); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reshape_reordered_all_dims.cairo b/tests/nodes/reshape_reordered_all_dims.cairo deleted file mode 100644 index a31b6e23c..000000000 --- a/tests/nodes/reshape_reordered_all_dims.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; - -#[test] -#[available_gas(2000000000)] -fn test_reshape_reordered_all_dims() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reshape(array![4,2,3].span()); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reshape_reordered_all_dims/input_0.cairo b/tests/nodes/reshape_reordered_all_dims/input_0.cairo deleted file mode 100644 index 1bf8d2578..000000000 --- a/tests/nodes/reshape_reordered_all_dims/input_0.cairo +++ /dev/null @@ -1,38 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(3); - shape.append(4); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reshape_reordered_all_dims/output_0.cairo b/tests/nodes/reshape_reordered_all_dims/output_0.cairo deleted file mode 100644 index 2308361dc..000000000 --- a/tests/nodes/reshape_reordered_all_dims/output_0.cairo +++ /dev/null @@ -1,38 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(4); - shape.append(2); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reshape_reordered_last_dims.cairo b/tests/nodes/reshape_reordered_last_dims.cairo deleted file mode 100644 index 4a1e47e5f..000000000 --- a/tests/nodes/reshape_reordered_last_dims.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; - -#[test] -#[available_gas(2000000000)] -fn test_reshape_reordered_last_dims() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reshape(array![2,4,3].span()); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reshape_reordered_last_dims/input_0.cairo b/tests/nodes/reshape_reordered_last_dims/input_0.cairo deleted file mode 100644 index 1bf8d2578..000000000 --- a/tests/nodes/reshape_reordered_last_dims/input_0.cairo +++ /dev/null @@ -1,38 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(3); - shape.append(4); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reshape_reordered_last_dims/output_0.cairo b/tests/nodes/reshape_reordered_last_dims/output_0.cairo deleted file mode 100644 index bb307aeb5..000000000 --- a/tests/nodes/reshape_reordered_last_dims/output_0.cairo +++ /dev/null @@ -1,38 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(4); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reshape_zero_and_negative_dim.cairo b/tests/nodes/reshape_zero_and_negative_dim.cairo deleted file mode 100644 index c3ab116c7..000000000 --- a/tests/nodes/reshape_zero_and_negative_dim.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; - -#[test] -#[available_gas(2000000000)] -fn test_reshape_zero_and_negative_dim() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reshape(array![2, 0, 1, -1].span()); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reshape_zero_and_negative_dim/input_0.cairo b/tests/nodes/reshape_zero_and_negative_dim/input_0.cairo deleted file mode 100644 index 1bf8d2578..000000000 --- a/tests/nodes/reshape_zero_and_negative_dim/input_0.cairo +++ /dev/null @@ -1,38 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(3); - shape.append(4); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reshape_zero_and_negative_dim/output_0.cairo b/tests/nodes/reshape_zero_and_negative_dim/output_0.cairo deleted file mode 100644 index b54f6fc0c..000000000 --- a/tests/nodes/reshape_zero_and_negative_dim/output_0.cairo +++ /dev/null @@ -1,39 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(3); - shape.append(1); - shape.append(4); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reshape_zero_dim.cairo b/tests/nodes/reshape_zero_dim.cairo deleted file mode 100644 index 4cc3e8870..000000000 --- a/tests/nodes/reshape_zero_dim.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; - -#[test] -#[available_gas(2000000000)] -fn test_reshape_zero_dim() { - let input_0 = input_0::input_0(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.reshape(array![2, 0, 4, 1].span()); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/reshape_zero_dim/input_0.cairo b/tests/nodes/reshape_zero_dim/input_0.cairo deleted file mode 100644 index 1bf8d2578..000000000 --- a/tests/nodes/reshape_zero_dim/input_0.cairo +++ /dev/null @@ -1,38 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(3); - shape.append(4); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reshape_zero_dim/output_0.cairo b/tests/nodes/reshape_zero_dim/output_0.cairo deleted file mode 100644 index 4c6823a34..000000000 --- a/tests/nodes/reshape_zero_dim/output_0.cairo +++ /dev/null @@ -1,39 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(3); - shape.append(4); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/xor_fp16x16.cairo b/tests/nodes/xor_fp16x16.cairo index 6f5c67bda..8a54526f7 100644 --- a/tests/nodes/xor_fp16x16.cairo +++ b/tests/nodes/xor_fp16x16.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::operators::tensor::I32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::FP16x16TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_xor_fp16x16() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.xor(@input_1); + let y = input_0.xor(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/xor_fp16x16/input_0.cairo b/tests/nodes/xor_fp16x16/input_0.cairo index a895df696..147abd897 100644 --- a/tests/nodes/xor_fp16x16/input_0.cairo +++ b/tests/nodes/xor_fp16x16/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_fp16x16/input_1.cairo b/tests/nodes/xor_fp16x16/input_1.cairo index 1d2d646c2..b87dfda30 100644 --- a/tests/nodes/xor_fp16x16/input_1.cairo +++ b/tests/nodes/xor_fp16x16/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_fp16x16/output_0.cairo b/tests/nodes/xor_fp16x16/output_0.cairo index 0e89fbf7a..36a93ca20 100644 --- a/tests/nodes/xor_fp16x16/output_0.cairo +++ b/tests/nodes/xor_fp16x16/output_0.cairo @@ -1,17 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); data.append(0); + data.append(1); data.append(0); - data.append(0); - data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_fp16x16_broadcast.cairo b/tests/nodes/xor_fp16x16_broadcast.cairo index 3e3660dee..a8d3f37e0 100644 --- a/tests/nodes/xor_fp16x16_broadcast.cairo +++ b/tests/nodes/xor_fp16x16_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::operators::tensor::I32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::FP16x16TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_xor_fp16x16_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.xor(@input_1); + let y = input_0.xor(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/xor_fp16x16_broadcast/input_0.cairo b/tests/nodes/xor_fp16x16_broadcast/input_0.cairo index e3279c555..f96f54173 100644 --- a/tests/nodes/xor_fp16x16_broadcast/input_0.cairo +++ b/tests/nodes/xor_fp16x16_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -10,8 +10,8 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_fp16x16_broadcast/input_1.cairo b/tests/nodes/xor_fp16x16_broadcast/input_1.cairo index 59a06f83b..edf1408cd 100644 --- a/tests/nodes/xor_fp16x16_broadcast/input_1.cairo +++ b/tests/nodes/xor_fp16x16_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_fp16x16_broadcast/output_0.cairo b/tests/nodes/xor_fp16x16_broadcast/output_0.cairo index 897d076d9..069598d33 100644 --- a/tests/nodes/xor_fp16x16_broadcast/output_0.cairo +++ b/tests/nodes/xor_fp16x16_broadcast/output_0.cairo @@ -1,17 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); - data.append(1); data.append(0); + data.append(0); + data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_fp8x23.cairo b/tests/nodes/xor_fp8x23.cairo index 577e1d365..7d89727de 100644 --- a/tests/nodes/xor_fp8x23.cairo +++ b/tests/nodes/xor_fp8x23.cairo @@ -4,21 +4,21 @@ mod output_0; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::I32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; #[test] #[available_gas(2000000000)] fn test_xor_fp8x23() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.xor(@input_1); + let y = input_0.xor(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/xor_fp8x23/input_0.cairo b/tests/nodes/xor_fp8x23/input_0.cairo index aa4155752..099b5378c 100644 --- a/tests/nodes/xor_fp8x23/input_0.cairo +++ b/tests/nodes/xor_fp8x23/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_fp8x23/input_1.cairo b/tests/nodes/xor_fp8x23/input_1.cairo index d67ef0695..b5883e501 100644 --- a/tests/nodes/xor_fp8x23/input_1.cairo +++ b/tests/nodes/xor_fp8x23/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_fp8x23/output_0.cairo b/tests/nodes/xor_fp8x23/output_0.cairo index 451f25ccc..3a99f04f6 100644 --- a/tests/nodes/xor_fp8x23/output_0.cairo +++ b/tests/nodes/xor_fp8x23/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -16,8 +15,6 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); - data.append(0); - data.append(0); data.append(1); data.append(0); data.append(1); @@ -25,17 +22,19 @@ fn output_0() -> Tensor { data.append(0); data.append(1); data.append(0); + data.append(1); + data.append(0); data.append(0); data.append(1); data.append(1); data.append(0); data.append(0); - data.append(0); data.append(1); data.append(1); data.append(0); data.append(0); data.append(0); + data.append(1); data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_fp8x23_broadcast.cairo b/tests/nodes/xor_fp8x23_broadcast.cairo index aeb36be85..30a57eb23 100644 --- a/tests/nodes/xor_fp8x23_broadcast.cairo +++ b/tests/nodes/xor_fp8x23_broadcast.cairo @@ -4,21 +4,21 @@ mod output_0; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::I32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; #[test] #[available_gas(2000000000)] fn test_xor_fp8x23_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.xor(@input_1); + let y = input_0.xor(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/xor_fp8x23_broadcast/input_0.cairo b/tests/nodes/xor_fp8x23_broadcast/input_0.cairo index 11337c926..68f8cc262 100644 --- a/tests/nodes/xor_fp8x23_broadcast/input_0.cairo +++ b/tests/nodes/xor_fp8x23_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_fp8x23_broadcast/input_1.cairo b/tests/nodes/xor_fp8x23_broadcast/input_1.cairo index 8f5154796..edba895b1 100644 --- a/tests/nodes/xor_fp8x23_broadcast/input_1.cairo +++ b/tests/nodes/xor_fp8x23_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_fp8x23_broadcast/output_0.cairo b/tests/nodes/xor_fp8x23_broadcast/output_0.cairo index 0e89fbf7a..83a3b1e6a 100644 --- a/tests/nodes/xor_fp8x23_broadcast/output_0.cairo +++ b/tests/nodes/xor_fp8x23_broadcast/output_0.cairo @@ -1,17 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); + data.append(1); data.append(0); - data.append(0); - data.append(0); + data.append(1); data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i32.cairo b/tests/nodes/xor_i32.cairo index b6900fc73..d1b6a7705 100644 --- a/tests/nodes/xor_i32.cairo +++ b/tests/nodes/xor_i32.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::I32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorSub}; #[test] #[available_gas(2000000000)] fn test_xor_i32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.xor(@input_1); + let y = input_0.xor(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/xor_i32/input_0.cairo b/tests/nodes/xor_i32/input_0.cairo index 6b8c58eda..6bffe1752 100644 --- a/tests/nodes/xor_i32/input_0.cairo +++ b/tests/nodes/xor_i32/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorSub}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(-1); - data.append(-2); - data.append(-1); data.append(2); - data.append(-3); - data.append(-2); - data.append(-1); - data.append(-3); - data.append(-1); data.append(1); data.append(1); - data.append(-1); data.append(0); - data.append(-1); + data.append(2); + data.append(2); data.append(2); data.append(2); data.append(-1); data.append(2); - data.append(-2); - data.append(0); data.append(-1); - data.append(-2); + data.append(0); + data.append(2); + data.append(1); + data.append(1); + data.append(-3); + data.append(-3); + data.append(-3); data.append(-1); + data.append(-3); data.append(-1); + data.append(-2); + data.append(2); + data.append(1); data.append(0); data.append(0); + data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i32/input_1.cairo b/tests/nodes/xor_i32/input_1.cairo index 77387de26..b857282fd 100644 --- a/tests/nodes/xor_i32/input_1.cairo +++ b/tests/nodes/xor_i32/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorSub}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); + data.append(-1); + data.append(-3); data.append(-2); data.append(0); + data.append(2); + data.append(2); + data.append(2); data.append(-3); + data.append(0); + data.append(0); + data.append(-1); data.append(1); data.append(-2); data.append(1); - data.append(1); data.append(-2); - data.append(-1); data.append(0); - data.append(2); + data.append(0); + data.append(-3); + data.append(1); + data.append(1); data.append(1); data.append(-2); - data.append(2); - data.append(2); - data.append(0); - data.append(0); data.append(1); + data.append(-2); data.append(1); - data.append(0); - data.append(2); data.append(-3); - data.append(2); - data.append(2); - data.append(2); - data.append(0); + data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i32/output_0.cairo b/tests/nodes/xor_i32/output_0.cairo index 828a00d81..323d9bf01 100644 --- a/tests/nodes/xor_i32/output_0.cairo +++ b/tests/nodes/xor_i32/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -12,8 +11,6 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); data.append(0); - data.append(1); - data.append(0); data.append(0); data.append(0); data.append(0); @@ -21,11 +18,12 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(1); - data.append(0); + data.append(1); data.append(0); data.append(1); data.append(0); data.append(0); + data.append(0); data.append(1); data.append(1); data.append(0); @@ -36,6 +34,7 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(1); + data.append(1); data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i32_broadcast.cairo b/tests/nodes/xor_i32_broadcast.cairo index b4b9d9d87..6a118bfb4 100644 --- a/tests/nodes/xor_i32_broadcast.cairo +++ b/tests/nodes/xor_i32_broadcast.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::I32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorSub}; #[test] #[available_gas(2000000000)] fn test_xor_i32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.xor(@input_1); + let y = input_0.xor(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/xor_i32_broadcast/input_0.cairo b/tests/nodes/xor_i32_broadcast/input_0.cairo index fe474ce81..af954e380 100644 --- a/tests/nodes/xor_i32_broadcast/input_0.cairo +++ b/tests/nodes/xor_i32_broadcast/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorSub}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,9 +8,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(2); - data.append(1); - data.append(2); + data.append(-2); data.append(0); + data.append(-3); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i32_broadcast/input_1.cairo b/tests/nodes/xor_i32_broadcast/input_1.cairo index cdf0184f0..e49c73fb5 100644 --- a/tests/nodes/xor_i32_broadcast/input_1.cairo +++ b/tests/nodes/xor_i32_broadcast/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I32Tensor, I32TensorSub}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,7 +8,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); - data.append(-1); + data.append(2); + data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i32_broadcast/output_0.cairo b/tests/nodes/xor_i32_broadcast/output_0.cairo index 085034f13..94c384136 100644 --- a/tests/nodes/xor_i32_broadcast/output_0.cairo +++ b/tests/nodes/xor_i32_broadcast/output_0.cairo @@ -1,17 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); data.append(0); data.append(1); - data.append(1); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i8.cairo b/tests/nodes/xor_i8.cairo index 8437bbf13..0ba2c0257 100644 --- a/tests/nodes/xor_i8.cairo +++ b/tests/nodes/xor_i8.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::I32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::I8TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use core::array::{ArrayTrait, SpanTrait}; #[test] #[available_gas(2000000000)] fn test_xor_i8() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.xor(@input_1); + let y = input_0.xor(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/xor_i8/input_0.cairo b/tests/nodes/xor_i8/input_0.cairo index 8eb98bdfe..13e2952b5 100644 --- a/tests/nodes/xor_i8/input_0.cairo +++ b/tests/nodes/xor_i8/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorSub}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(-3); data.append(2); - data.append(1); + data.append(-3); data.append(-1); - data.append(0); + data.append(2); + data.append(1); + data.append(-3); data.append(2); data.append(-3); + data.append(-1); + data.append(-2); + data.append(-1); data.append(0); data.append(-1); data.append(1); data.append(-3); + data.append(-1); data.append(-2); data.append(-3); - data.append(-3); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - data.append(0); data.append(-2); + data.append(-1); + data.append(-3); data.append(-2); data.append(0); - data.append(-2); - data.append(-3); data.append(2); + data.append(-3); data.append(1); - data.append(0); - data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i8/input_1.cairo b/tests/nodes/xor_i8/input_1.cairo index ade07f6d4..700afe506 100644 --- a/tests/nodes/xor_i8/input_1.cairo +++ b/tests/nodes/xor_i8/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorSub}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -12,30 +11,30 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(2); data.append(-1); - data.append(-2); - data.append(-3); - data.append(-2); - data.append(2); - data.append(-3); data.append(1); - data.append(-1); data.append(1); - data.append(-1); + data.append(2); + data.append(2); data.append(1); + data.append(-3); data.append(1); - data.append(0); - data.append(0); + data.append(-3); + data.append(2); data.append(-2); data.append(-3); - data.append(1); - data.append(1); - data.append(-1); - data.append(0); - data.append(-1); + data.append(-2); + data.append(2); + data.append(-2); + data.append(2); + data.append(2); data.append(0); data.append(-2); data.append(2); data.append(-3); - data.append(-1); + data.append(-2); + data.append(-2); + data.append(1); + data.append(-3); + data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i8/output_0.cairo b/tests/nodes/xor_i8/output_0.cairo index f4cc18d31..8affaf526 100644 --- a/tests/nodes/xor_i8/output_0.cairo +++ b/tests/nodes/xor_i8/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -13,29 +12,29 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(0); data.append(0); data.append(0); data.append(0); data.append(0); - data.append(1); - data.append(1); - data.append(1); data.append(0); data.append(1); data.append(0); data.append(0); data.append(0); data.append(0); + data.append(0); data.append(1); data.append(0); data.append(0); + data.append(0); + data.append(0); data.append(1); data.append(0); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i8_broadcast.cairo b/tests/nodes/xor_i8_broadcast.cairo index 1d7ed2330..98e31be33 100644 --- a/tests/nodes/xor_i8_broadcast.cairo +++ b/tests/nodes/xor_i8_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::I32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::I8TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use core::array::{ArrayTrait, SpanTrait}; #[test] #[available_gas(2000000000)] fn test_xor_i8_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.xor(@input_1); + let y = input_0.xor(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/xor_i8_broadcast/input_0.cairo b/tests/nodes/xor_i8_broadcast/input_0.cairo index 7aadf0945..402d98a0a 100644 --- a/tests/nodes/xor_i8_broadcast/input_0.cairo +++ b/tests/nodes/xor_i8_broadcast/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorSub}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,9 +8,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-3); - data.append(-2); - data.append(-3); + data.append(0); + data.append(2); data.append(1); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i8_broadcast/input_1.cairo b/tests/nodes/xor_i8_broadcast/input_1.cairo index 9c02b355b..c5b62636c 100644 --- a/tests/nodes/xor_i8_broadcast/input_1.cairo +++ b/tests/nodes/xor_i8_broadcast/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{I8Tensor, I8TensorSub}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,7 +8,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(2); - data.append(0); + data.append(-2); + data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_i8_broadcast/output_0.cairo b/tests/nodes/xor_i8_broadcast/output_0.cairo index 5614176ce..01569b35a 100644 --- a/tests/nodes/xor_i8_broadcast/output_0.cairo +++ b/tests/nodes/xor_i8_broadcast/output_0.cairo @@ -1,17 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); data.append(1); data.append(0); - data.append(1); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_u32.cairo b/tests/nodes/xor_u32.cairo index 0b02997b7..739d60af4 100644 --- a/tests/nodes/xor_u32.cairo +++ b/tests/nodes/xor_u32.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; -use orion::operators::tensor::I32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; #[test] #[available_gas(2000000000)] fn test_xor_u32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.xor(@input_1); + let y = input_0.xor(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/xor_u32/input_0.cairo b/tests/nodes/xor_u32/input_0.cairo index 92bab79ef..ce0703451 100644 --- a/tests/nodes/xor_u32/input_0.cairo +++ b/tests/nodes/xor_u32/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -11,31 +10,31 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); - data.append(4); + data.append(1); + data.append(1); data.append(5); + data.append(3); data.append(1); + data.append(2); + data.append(2); data.append(0); data.append(0); - data.append(3); - data.append(0); - data.append(2); - data.append(3); - data.append(3); - data.append(3); data.append(5); data.append(2); + data.append(1); + data.append(2); + data.append(1); + data.append(1); + data.append(5); data.append(2); data.append(0); - data.append(4); - data.append(3); data.append(0); - data.append(4); + data.append(0); + data.append(2); + data.append(1); data.append(1); data.append(4); - data.append(3); - data.append(2); - data.append(2); - data.append(3); - data.append(3); + data.append(1); + data.append(5); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_u32/input_1.cairo b/tests/nodes/xor_u32/input_1.cairo index 7d0abdeab..1e770e8dd 100644 --- a/tests/nodes/xor_u32/input_1.cairo +++ b/tests/nodes/xor_u32/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,32 +9,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(5); - data.append(3); data.append(4); - data.append(1); - data.append(0); - data.append(3); - data.append(1); data.append(4); - data.append(1); - data.append(0); + data.append(2); data.append(5); + data.append(0); data.append(5); data.append(2); data.append(3); + data.append(0); + data.append(5); + data.append(0); + data.append(2); + data.append(0); data.append(2); data.append(1); + data.append(5); + data.append(1); + data.append(5); data.append(4); + data.append(4); + data.append(0); data.append(3); - data.append(3); - data.append(2); - data.append(2); data.append(1); - data.append(3); - data.append(5); + data.append(1); data.append(0); - data.append(2); + data.append(1); data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_u32/output_0.cairo b/tests/nodes/xor_u32/output_0.cairo index 94e773bce..e32c287bf 100644 --- a/tests/nodes/xor_u32/output_0.cairo +++ b/tests/nodes/xor_u32/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -14,9 +13,12 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); - data.append(0); data.append(1); data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); data.append(1); data.append(0); data.append(1); @@ -26,14 +28,11 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(1); - data.append(0); - data.append(0); data.append(1); data.append(0); data.append(0); data.append(0); data.append(0); - data.append(0); data.append(1); data.append(0); data.append(0); diff --git a/tests/nodes/xor_u32_broadcast.cairo b/tests/nodes/xor_u32_broadcast.cairo index 0fa6a593c..9c5abf64f 100644 --- a/tests/nodes/xor_u32_broadcast.cairo +++ b/tests/nodes/xor_u32_broadcast.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; -use orion::operators::tensor::I32TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; #[test] #[available_gas(2000000000)] fn test_xor_u32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); + let z = output_0::output_0(); - let y_0 = input_0.xor(@input_1); + let y = input_0.xor(@input_1); - assert_eq(y_0, z_0); + assert_eq(y, z); } diff --git a/tests/nodes/xor_u32_broadcast/input_0.cairo b/tests/nodes/xor_u32_broadcast/input_0.cairo index 58317b7f3..48012ef07 100644 --- a/tests/nodes/xor_u32_broadcast/input_0.cairo +++ b/tests/nodes/xor_u32_broadcast/input_0.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,9 +8,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(4); - data.append(4); data.append(2); - data.append(4); + data.append(0); + data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_u32_broadcast/input_1.cairo b/tests/nodes/xor_u32_broadcast/input_1.cairo index 57bf57831..d6a15bdd7 100644 --- a/tests/nodes/xor_u32_broadcast/input_1.cairo +++ b/tests/nodes/xor_u32_broadcast/input_1.cairo @@ -1,7 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,7 +8,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(4); - data.append(4); + data.append(3); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/xor_u32_broadcast/output_0.cairo b/tests/nodes/xor_u32_broadcast/output_0.cairo index 0e89fbf7a..069598d33 100644 --- a/tests/nodes/xor_u32_broadcast/output_0.cairo +++ b/tests/nodes/xor_u32_broadcast/output_0.cairo @@ -1,9 +1,8 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; +use orion::operators::tensor::{U32Tensor, U32TensorSub}; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); @@ -12,6 +11,6 @@ fn output_0() -> Tensor { data.append(0); data.append(0); data.append(0); - data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } From 1507b962876142f9316711a9584d2cdfb201a592 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Fri, 22 Mar 2024 16:44:36 +0100 Subject: [PATCH 32/68] refactor gather --- src/operators/tensor/core.cairo | 2 +- .../tensor/implementations/tensor_bool.cairo | 2 +- .../implementations/tensor_complex64.cairo | 2 +- .../implementations/tensor_fp16x16.cairo | 2 +- .../implementations/tensor_fp16x16wide.cairo | 2 +- .../implementations/tensor_fp32x32.cairo | 2 +- .../implementations/tensor_fp64x64.cairo | 2 +- .../implementations/tensor_fp8x23.cairo | 2 +- .../implementations/tensor_fp8x23wide.cairo | 2 +- .../tensor/implementations/tensor_i32.cairo | 2 +- .../tensor/implementations/tensor_i8.cairo | 2 +- .../tensor/implementations/tensor_u32.cairo | 2 +- src/operators/tensor/math/gather.cairo | 42 +++++++++++++++---- tests/lib.cairo | 12 +++--- 14 files changed, 52 insertions(+), 26 deletions(-) diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 0d21a4de3..a5919cb85 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -3211,7 +3211,7 @@ trait TensorTrait { /// [1. 2. 3.]] /// ``` /// - fn gather(self: @Tensor, indices: Tensor, axis: Option) -> Tensor; + fn gather(self: @Tensor, indices: Tensor, axis: Option) -> Tensor; /// # tensor.unsqueeze /// /// ```rust diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index 612a397cc..82de59236 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -226,7 +226,7 @@ impl BoolTensor of TensorTrait { core_ops::slice::(self, starts, ends, axes, steps) } - fn gather(self: @Tensor, indices: Tensor, axis: Option) -> Tensor { + fn gather(self: @Tensor, indices: Tensor, axis: Option) -> Tensor { math::gather::gather(self, indices, axis) } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index c9c31ae23..43f0b8d3f 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -301,7 +301,7 @@ impl Complex64Tensor of TensorTrait { } fn gather( - self: @Tensor, indices: Tensor, axis: Option + self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { math::gather::gather(self, indices, axis) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index a37ed0442..b2a77e2f0 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -349,7 +349,7 @@ impl FP16x16Tensor of TensorTrait { } fn gather( - self: @Tensor, indices: Tensor, axis: Option + self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { math::gather::gather(self, indices, axis) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index 2003b28ff..2beef19c8 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -313,7 +313,7 @@ impl FP16x16WTensor of TensorTrait { } fn gather( - self: @Tensor, indices: Tensor, axis: Option + self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { math::gather::gather(self, indices, axis) } diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 4870226a1..52dfb8147 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -346,7 +346,7 @@ impl FP32x32Tensor of TensorTrait { } fn gather( - self: @Tensor, indices: Tensor, axis: Option + self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { math::gather::gather(self, indices, axis) } diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 3a7214d18..700ef33d4 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -346,7 +346,7 @@ impl FP64x64Tensor of TensorTrait { } fn gather( - self: @Tensor, indices: Tensor, axis: Option + self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { math::gather::gather(self, indices, axis) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index b4a26d749..3e6f3ccfa 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -346,7 +346,7 @@ impl FP8x23Tensor of TensorTrait { } fn gather( - self: @Tensor, indices: Tensor, axis: Option + self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { math::gather::gather(self, indices, axis) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index 06a297b69..1181ad3a1 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -300,7 +300,7 @@ impl FP8x23WTensor of TensorTrait { } fn gather( - self: @Tensor, indices: Tensor, axis: Option + self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { math::gather::gather(self, indices, axis) } diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 296876516..c5c88a5d8 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -340,7 +340,7 @@ impl I32Tensor of TensorTrait { core_tensor::slice::(self, starts, ends, axes, steps) } - fn gather(self: @Tensor, indices: Tensor, axis: Option) -> Tensor { + fn gather(self: @Tensor, indices: Tensor, axis: Option) -> Tensor { math::gather::gather(self, indices, axis) } diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 42d807c68..78126051b 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -343,7 +343,7 @@ impl I8Tensor of TensorTrait { core_tensor::slice::(self, starts, ends, axes, steps) } - fn gather(self: @Tensor, indices: Tensor, axis: Option) -> Tensor { + fn gather(self: @Tensor, indices: Tensor, axis: Option) -> Tensor { math::gather::gather(self, indices, axis) } diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index efb681a86..1df2de437 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -287,7 +287,7 @@ impl U32Tensor of TensorTrait { core_tensor::slice::(self, starts, ends, axes, steps) } - fn gather(self: @Tensor, indices: Tensor, axis: Option) -> Tensor { + fn gather(self: @Tensor, indices: Tensor, axis: Option) -> Tensor { math::gather::gather(self, indices, axis) } diff --git a/src/operators/tensor/math/gather.cairo b/src/operators/tensor/math/gather.cairo index a60e927ab..c14978969 100644 --- a/src/operators/tensor/math/gather.cairo +++ b/src/operators/tensor/math/gather.cairo @@ -1,3 +1,5 @@ +use core::option::OptionTrait; +use core::traits::TryInto; use alexandria_data_structures::array_ext::SpanTraitExt; use orion::numbers::NumberTrait; @@ -5,17 +7,41 @@ use orion::operators::tensor::{TensorTrait, Tensor}; /// Cf: TensorTrait::gather docstring fn gather, impl TCopy: Copy, impl TDrop: Drop,>( - self: @Tensor, indices: Tensor, axis: Option + self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { - let axis = match axis { - Option::Some(val) => val, + let axis: usize = match axis { + Option::Some(val) => { + if val < 0 { + (((*self.shape).len()).try_into().unwrap() + val).try_into().unwrap() + } else { + val.try_into().unwrap() + } + }, Option::None => 0 }; assert(axis < (*self.shape).len(), 'axis out of dimensions'); let axis_shape = *(*self.shape).at(axis); - let ind_max = indices.data.max().unwrap(); - assert(ind_max < axis_shape, 'this index out of bounds'); + + // Adjust indices that are negative + let mut adjusted_indices = array![]; + let mut indices_data = indices.data.clone(); + loop { + match indices_data.pop_front() { + Option::Some(index) => { + let adjusted_index: usize = if *index < 0 { + let val: u32 = (axis_shape.try_into().unwrap() + *index).try_into().unwrap(); + val + } else { + let val: u32 = (*index).try_into().unwrap(); + val + }; + assert(adjusted_index >= 0 && adjusted_index < axis_shape, 'Index out of bounds'); + adjusted_indices.append(adjusted_index); + }, + Option::None => { break; } + }; + }; let mut output_data = array![]; let mut output_size = array![]; @@ -80,14 +106,14 @@ fn gather, impl TCopy: Copy, impl TDrop: let mut outer_loop: usize = 0; let axis_index = *self.shape[axis]; while outer_loop != outer_loop_break { - let mut data_indices = indices.data; + let mut adjusted_indices_iter = adjusted_indices.clone(); loop { - match data_indices.pop_front() { + match adjusted_indices_iter.pop_front() { Option::Some(indice) => { let mut inner_loop = 0; while inner_loop != break_loop { let new_val = inner_loop / divisor % axis_index; - if *indice == new_val { + if indice == new_val { output_data.append(*self.data[break_loop * outer_loop + inner_loop]); } diff --git a/tests/lib.cairo b/tests/lib.cairo index f5cecb77d..eb58139db 100644 --- a/tests/lib.cairo +++ b/tests/lib.cairo @@ -1,7 +1,7 @@ -mod numbers; -mod performance; -mod tensor_core; -mod nodes; -mod ml; -mod operators; +// mod numbers; +// mod performance; +// mod tensor_core; +// mod nodes; +// mod ml; +// mod operators; From 49b08d10bb27c2e8f45273724c8c64a2cdcf48d4 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Fri, 22 Mar 2024 16:47:57 +0100 Subject: [PATCH 33/68] fix test file manager --- nodegen/file_manager.py | 35 +- tests/lib.cairo | 2 +- tests/nodes.cairo | 2098 +++++++++++++++++++-------------------- 3 files changed, 1073 insertions(+), 1062 deletions(-) diff --git a/nodegen/file_manager.py b/nodegen/file_manager.py index 203b6b333..babe26399 100644 --- a/nodegen/file_manager.py +++ b/nodegen/file_manager.py @@ -91,25 +91,36 @@ def base_template( This method generates a list of strings that form the template of a Cairo test function, including module imports, function definition, and assertions. """ - return [ + template = [ *[f"mod input_{i};" for i in range(arg_cnt)], *[f"mod output_{i};" for i in range(out_cnt)], - *[""], - *[""], + "", + "", *[f"use {ref};" for ref in refs], - *[""], - *["#[test]"], - *["#[available_gas(2000000000)]"], - *[f"fn test_{name}()" + " {"], + "", + "#[test]", + "#[available_gas(2000000000)]", + f"fn test_{name}()" + " {", *[f" let input_{i} = input_{i}::input_{i}();" for i in range(arg_cnt)], *[f" let z_{i} = output_{i}::output_{i}();" for i in range(out_cnt)], - *[""], - *[f" let ({', '.join(f'y_{i}' for i in range(out_cnt))}) = {func_sig};"], - *[""], - *[f" assert_eq(y_{i}, z_{i});" for i in range(out_cnt)], - *["}"], + "" ] + # Handling conditional function signature based on the number of outputs + if out_cnt > 1: + template.append(f" let ({', '.join(f'y_{i}' for i in range(out_cnt))}) = {func_sig};") + else: + template.append(f" let y_0 = {func_sig};") + + # Continue appending to the template + template.extend([ + "", + *[f" assert_eq(y_{i}, z_{i});" for i in range(out_cnt)], + "}" + ]) + + return template + @classmethod def sequence_template(cls, name: str, arg_cnt: int, refs: list[str], func_sig: str) -> list[str]: """ diff --git a/tests/lib.cairo b/tests/lib.cairo index eb58139db..a61287d92 100644 --- a/tests/lib.cairo +++ b/tests/lib.cairo @@ -1,7 +1,7 @@ // mod numbers; // mod performance; // mod tensor_core; -// mod nodes; +mod nodes; // mod ml; // mod operators; diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 29bebb762..09ab341e9 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1,1049 +1,1049 @@ -mod abs_fp16x16; -mod abs_fp8x23; -mod abs_i32; -mod abs_i8; -mod acos_fp16x16; -mod acos_fp8x23; -mod acosh_fp16x16; -mod acosh_fp8x23; -mod add_fp16x16; -mod add_fp16x16_broadcast; -mod add_fp8x23; -mod add_fp8x23_broadcast; -mod add_i32; -mod add_i32_broadcast; -mod add_i8; -mod add_i8_broadcast; -mod add_u32; -mod add_u32_broadcast; -mod argmax_fp16x16_1D_default; -mod argmax_fp16x16_1D_keepdims_false; -mod argmax_fp16x16_1D_last_index; -mod argmax_fp16x16_2D_default; -mod argmax_fp16x16_2D_keepdims_false; -mod argmax_fp16x16_2D_last_index; -mod argmax_fp16x16_3D_default; -mod argmax_fp16x16_3D_keepdims_false; -mod argmax_fp16x16_3D_last_index; -mod argmax_fp8x23_1D_default; -mod argmax_fp8x23_1D_keepdims_false; -mod argmax_fp8x23_1D_last_index; -mod argmax_fp8x23_2D_default; -mod argmax_fp8x23_2D_keepdims_false; -mod argmax_fp8x23_2D_last_index; -mod argmax_fp8x23_3D_default; -mod argmax_fp8x23_3D_keepdims_false; -mod argmax_fp8x23_3D_last_index; -mod argmax_i32_1D_default; -mod argmax_i32_1D_keepdims_false; -mod argmax_i32_1D_last_index; -mod argmax_i32_2D_default; -mod argmax_i32_2D_keepdims_false; -mod argmax_i32_2D_last_index; -mod argmax_i32_3D_default; -mod argmax_i32_3D_keepdims_false; -mod argmax_i32_3D_last_index; -mod argmax_i8_1D_default; -mod argmax_i8_1D_keepdims_false; -mod argmax_i8_1D_last_index; -mod argmax_i8_2D_default; -mod argmax_i8_2D_keepdims_false; -mod argmax_i8_2D_last_index; -mod argmax_i8_3D_default; -mod argmax_i8_3D_keepdims_false; -mod argmax_i8_3D_last_index; -mod argmax_u32_1D_default; -mod argmax_u32_1D_keepdims_false; -mod argmax_u32_1D_last_index; -mod argmax_u32_2D_default; -mod argmax_u32_2D_keepdims_false; -mod argmax_u32_2D_last_index; -mod argmax_u32_3D_default; -mod argmax_u32_3D_keepdims_false; -mod argmax_u32_3D_last_index; -mod argmin_fp16x16_1D_default; -mod argmin_fp16x16_1D_keepdims_false; -mod argmin_fp16x16_1D_last_index; -mod argmin_fp16x16_2D_default; -mod argmin_fp16x16_2D_keepdims_false; -mod argmin_fp16x16_2D_last_index; -mod argmin_fp16x16_3D_default; -mod argmin_fp16x16_3D_keepdims_false; -mod argmin_fp16x16_3D_last_index; -mod argmin_fp8x23_1D_default; -mod argmin_fp8x23_1D_keepdims_false; -mod argmin_fp8x23_1D_last_index; -mod argmin_fp8x23_2D_default; -mod argmin_fp8x23_2D_keepdims_false; -mod argmin_fp8x23_2D_last_index; -mod argmin_fp8x23_3D_default; -mod argmin_fp8x23_3D_keepdims_false; -mod argmin_fp8x23_3D_last_index; -mod argmin_i32_1D_default; -mod argmin_i32_1D_keepdims_false; -mod argmin_i32_1D_last_index; -mod argmin_i32_2D_default; -mod argmin_i32_2D_keepdims_false; -mod argmin_i32_2D_last_index; -mod argmin_i32_3D_default; -mod argmin_i32_3D_keepdims_false; -mod argmin_i32_3D_last_index; -mod argmin_i8_1D_default; -mod argmin_i8_1D_keepdims_false; -mod argmin_i8_1D_last_index; -mod argmin_i8_2D_default; -mod argmin_i8_2D_keepdims_false; -mod argmin_i8_2D_last_index; -mod argmin_i8_3D_default; -mod argmin_i8_3D_keepdims_false; -mod argmin_i8_3D_last_index; -mod argmin_u32_1D_default; -mod argmin_u32_1D_keepdims_false; -mod argmin_u32_1D_last_index; -mod argmin_u32_2D_default; -mod argmin_u32_2D_keepdims_false; -mod argmin_u32_2D_last_index; -mod argmin_u32_3D_default; -mod argmin_u32_3D_keepdims_false; -mod argmin_u32_3D_last_index; -mod asin_fp16x16; -mod asin_fp8x23; -mod asinh_fp16x16; -mod asinh_fp8x23; -mod atan_fp16x16; -mod atan_fp8x23; -mod ceil_fp16x16; -mod ceil_fp8x23; -mod concat_fp16x16_1d; -mod concat_fp16x16_2d; -mod concat_fp16x16_3d_default; -mod concat_fp16x16_3d_axis_1; -mod concat_fp16x16_3d_axis_2; -mod concat_fp16x16_3d_three_tensors_axis_1; -mod concat_fp16x16_3d_three_tensors_axis_2; -mod concat_fp8x23_1d; -mod concat_fp8x23_2d; -mod concat_fp8x23_3d_default; -mod concat_fp8x23_3d_axis_1; -mod concat_fp8x23_3d_axis_2; -mod concat_fp8x23_3d_three_tensors_axis_1; -mod concat_fp8x23_3d_three_tensors_axis_2; -mod concat_i32_1d; -mod concat_i32_2d; -mod concat_i32_3d_default; -mod concat_i32_3d_axis_1; -mod concat_i32_3d_axis_2; -mod concat_i32_3d_three_tensors_axis_1; -mod concat_i32_3d_three_tensors_axis_2; -mod concat_i8_1d; -mod concat_i8_2d; -mod concat_i8_3d_default; -mod concat_i8_3d_axis_1; -mod concat_i8_3d_axis_2; -mod concat_i8_3d_three_tensors_axis_1; -mod concat_i8_3d_three_tensors_axis_2; -mod concat_u32_1d; -mod concat_u32_2d; -mod concat_u32_3d_default; -mod concat_u32_3d_axis_1; -mod concat_u32_3d_axis_2; -mod concat_u32_3d_three_tensors_axis_1; -mod concat_u32_3d_three_tensors_axis_2; -mod cos_fp16x16; -mod cos_fp8x23; -mod cosh_fp16x16; -mod cosh_fp8x23; -mod cumsum_fp16x16_1d_default; -mod cumsum_fp16x16_1d_exclusive; -mod cumsum_fp16x16_1d_reverse; -mod cumsum_fp16x16_1d_reverse_exclusive; -mod cumsum_fp16x16_2d_axis_0; -mod cumsum_fp16x16_2d_axis_1; -mod cumsum_fp8x23_1d_default; -mod cumsum_fp8x23_1d_exclusive; -mod cumsum_fp8x23_1d_reverse; -mod cumsum_fp8x23_1d_reverse_exclusive; -mod cumsum_fp8x23_2d_axis_0; -mod cumsum_fp8x23_2d_axis_1; -mod cumsum_i32_1d_default; -mod cumsum_i32_1d_exclusive; -mod cumsum_i32_1d_reverse; -mod cumsum_i32_1d_reverse_exclusive; -mod cumsum_i32_2d_axis_0; -mod cumsum_i32_2d_axis_1; -mod cumsum_i8_1d_default; -mod cumsum_i8_1d_exclusive; -mod cumsum_i8_1d_reverse; -mod cumsum_i8_1d_reverse_exclusive; -mod cumsum_i8_2d_axis_0; -mod cumsum_i8_2d_axis_1; -mod cumsum_u32_1d_default; -mod cumsum_u32_1d_exclusive; -mod cumsum_u32_1d_reverse; -mod cumsum_u32_1d_reverse_exclusive; -mod cumsum_u32_2d_axis_0; -mod cumsum_u32_2d_axis_1; -mod div_fp16x16; -mod div_fp16x16_broadcast; -mod div_fp8x23; -mod div_fp8x23_broadcast; -mod div_i32; -mod div_i32_broadcast; -mod div_i8; -mod div_i8_broadcast; -mod div_u32; -mod div_u32_broadcast; -mod equal_fp16x16; -mod equal_fp16x16_broadcast; -mod equal_fp8x23; -mod equal_fp8x23_broadcast; -mod equal_i32; -mod equal_i32_broadcast; -mod equal_i8; -mod equal_i8_broadcast; -mod equal_u32; -mod equal_u32_broadcast; -mod exp_fp16x16; -mod exp_fp8x23; -mod less_equal_fp16x16; -mod less_equal_fp16x16_broadcast; -mod less_equal_fp8x23; -mod less_equal_fp8x23_broadcast; -mod less_equal_i32; -mod less_equal_i32_broadcast; -mod less_equal_i8; -mod less_equal_i8_broadcast; -mod less_equal_u32; -mod less_equal_u32_broadcast; -mod greater_fp16x16; -mod greater_fp16x16_broadcast; -mod greater_fp8x23; -mod greater_fp8x23_broadcast; -mod greater_i32; -mod greater_i32_broadcast; -mod greater_i8; -mod greater_i8_broadcast; -mod greater_u32; -mod greater_u32_broadcast; -mod leaky_relu_fp16x16; -mod leaky_relu_fp8x23; -mod linear_fp16x16; -mod linear_fp8x23; -mod linear_i32; -mod linear_i8; -mod linear_u32; -mod log_fp16x16; -mod log_fp8x23; -mod logsoftmax_fp16x16_axis_0; -mod logsoftmax_fp16x16_axis_1; -mod logsoftmax_fp8x23_axis_0; -mod logsoftmax_fp8x23_axis_1; -mod matmul_fp16x16_1d; -mod matmul_fp16x16_2x2; -mod matmul_fp16x16_2x1; -mod matmul_fp16x16_1x2; -mod matmul_fp8x23_1d; -mod matmul_fp8x23_2x2; -mod matmul_fp8x23_2x1; -mod matmul_fp8x23_1x2; -mod matmul_i32_1d; -mod matmul_i32_2x2; -mod matmul_i32_2x1; -mod matmul_i32_1x2; -mod matmul_i8_1d; -mod matmul_i8_2x2; -mod matmul_i8_2x1; -mod matmul_i8_1x2; -mod matmul_u32_1d; -mod matmul_u32_2x2; -mod matmul_u32_2x1; -mod matmul_u32_1x2; -mod mul_fp16x16; -mod mul_fp16x16_broadcast; -mod mul_fp8x23; -mod mul_fp8x23_broadcast; -mod mul_i32; -mod mul_i32_broadcast; -mod mul_i8; -mod mul_i8_broadcast; -mod mul_u32; -mod mul_u32_broadcast; -mod or_fp16x16; -mod or_fp16x16_broadcast; -mod or_fp8x23; -mod or_fp8x23_broadcast; -mod or_i32; -mod or_i32_broadcast; -mod or_i8; -mod or_i8_broadcast; -mod or_u32; -mod or_u32_broadcast; -mod reduce_sum_fp16x16_1D; -mod reduce_sum_fp16x16_2D_default; -mod reduce_sum_fp16x16_2D_keepdims; -mod reduce_sum_fp16x16_2D_axis_1; -mod reduce_sum_fp8x23_1D; -mod reduce_sum_fp8x23_2D_default; -mod reduce_sum_fp8x23_2D_keepdims; -mod reduce_sum_fp8x23_2D_axis_1; -mod reduce_sum_i32_1D; -mod reduce_sum_i32_2D_default; -mod reduce_sum_i32_2D_keepdims; -mod reduce_sum_i32_2D_axis_1; -mod reduce_sum_i8_1D; -mod reduce_sum_i8_2D_default; -mod reduce_sum_i8_2D_keepdims; -mod reduce_sum_i8_2D_axis_1; -mod reduce_sum_u32_1D; -mod reduce_sum_u32_2D_default; -mod reduce_sum_u32_2D_keepdims; -mod reduce_sum_u32_2D_axis_1; -mod relu_fp16x16; -mod relu_fp8x23; -mod relu_i32; -mod relu_i8; -mod sigmoid_fp16x16; -mod sigmoid_fp8x23; -mod sin_fp16x16; -mod sin_fp8x23; -mod sinh_fp16x16; -mod sinh_fp8x23; -mod softmax_fp16x16; -mod softmax_fp8x23; -mod softplus_fp8x23; -mod softplus_fp16x16; -mod softsign_fp8x23; -mod softsign_fp16x16; -mod sqrt_fp16x16; -mod sqrt_fp8x23; -mod sub_fp16x16; -mod sub_fp16x16_broadcast; -mod sub_fp8x23; -mod sub_fp8x23_broadcast; -mod sub_i32; -mod sub_i32_broadcast; -mod sub_i8; -mod sub_i8_broadcast; -mod sub_u32; -mod sub_u32_broadcast; -mod tanh_fp16x16; -mod tanh_fp8x23; -mod transpose_fp16x16_2d; -mod transpose_fp16x16_3d; -mod transpose_fp8x23_2d; -mod transpose_fp8x23_3d; -mod transpose_i32_2d; -mod transpose_i32_3d; -mod transpose_i8_2d; -mod transpose_i8_3d; -mod transpose_u32_2d; -mod transpose_u32_3d; -mod xor_fp16x16; -mod xor_fp16x16_broadcast; -mod xor_fp8x23; -mod xor_fp8x23_broadcast; -mod xor_i32; -mod xor_i32_broadcast; -mod xor_i8; -mod xor_i8_broadcast; -mod xor_u32; -mod xor_u32_broadcast; -mod less_fp16x16; -mod less_fp16x16_broadcast; -mod less_fp8x23; -mod less_fp8x23_broadcast; -mod less_i32; -mod less_i32_broadcast; -mod less_i8; -mod less_i8_broadcast; -mod less_u32; -mod less_u32_broadcast; -mod greater_equal_fp16x16; -mod greater_equal_fp16x16_broadcast; -mod greater_equal_fp8x23; -mod greater_equal_fp8x23_broadcast; -mod greater_equal_i32; -mod greater_equal_i32_broadcast; -mod greater_equal_i8; -mod greater_equal_i8_broadcast; -mod greater_equal_u32; -mod greater_equal_u32_broadcast; -mod slice_fp16x16_2d; -mod slice_fp16x16_3d; -mod slice_fp8x23_2d; -mod slice_fp8x23_3d; -mod slice_i32_2d; -mod slice_i32_3d; -mod slice_i8_2d; -mod slice_i8_3d; -mod slice_u32_2d; -mod slice_u32_3d; -mod gather_fp8x23_3d_default; -mod gather_fp8x23_3d_axis1; -mod gather_fp8x23_3d_axis2; -mod gather_fp16x16_3d_default; -mod gather_fp16x16_3d_axis1; -mod gather_fp16x16_3d_axis2; -mod gather_i8_3d_default; -mod gather_i8_3d_axis1; -mod gather_i8_3d_axis2; -mod gather_i32_3d_default; -mod gather_i32_3d_axis1; -mod gather_i32_3d_axis2; -mod gather_u32_3d_default; -mod gather_u32_3d_axis1; -mod gather_u32_3d_axis2; -mod nonzero_fp16x16_2d; -mod nonzero_fp16x16_3d; -mod nonzero_fp8x23_2d; -mod nonzero_fp8x23_3d; -mod nonzero_i32_2d; -mod nonzero_i32_3d; -mod nonzero_i8_2d; -mod nonzero_i8_3d; -mod nonzero_u32_2d; -mod nonzero_u32_3d; -mod squeeze_fP16x16; -mod squeeze_fP8x23; -mod squeeze_i32; -mod squeeze_i8; -mod squeeze_u32; -mod unsqueeze_fp16x16_2d; -mod unsqueeze_fp16x16_3d; -mod unsqueeze_fp8x23_2d; -mod unsqueeze_fp8x23_3d; -mod unsqueeze_i32_2d; -mod unsqueeze_i32_3d; -mod unsqueeze_i8_2d; -mod unsqueeze_i8_3d; -mod unsqueeze_u32_2d; -mod unsqueeze_u32_3d; -mod sign_fP16x16; -mod sign_fP8x23; -mod sign_fail; -mod sign_i32; -mod sign_i8; -mod clip_fp16x16_2d; -mod clip_fp16x16_3d; -mod clip_fp8x23_2d; -mod clip_fp8x23_3d; -mod clip_i32_2d; -mod clip_i32_3d; -mod clip_i8_2d; -mod clip_i8_3d; -mod clip_u32_2d; -mod clip_u32_3d; -mod identity_fP16x16; -mod identity_fP8x23; -mod identity_i32; -mod identity_i8; -mod identity_u32; -mod thresholded_relu_fp16x16; -mod thresholded_relu_fp8x23; -mod hard_sigmoid_fp8x23; -mod hard_sigmoid_fp16x16; -mod neg_fp16x16; -mod neg_fp8x23; -mod neg_i32; -mod neg_i8; -mod gemm_all_attributes; -mod gemm_alpha; -mod gemm_beta; -mod gemm_default_matrix_bias; -mod gemm_default_vector_bias; -mod gemm_default_no_bias; -mod gemm_transposeA; -mod gemm_transposeB; -mod min_fp16x16_three_tensors; -mod min_fp16x16_broadcast_three_tensors; -mod min_fp16x16_two_tensors; -mod min_fp16x16_broadcast_two_tensors; -mod min_fp8x23_three_tensors; -mod min_fp8x23_broadcast_three_tensors; -mod min_fp8x23_two_tensors; -mod min_fp8x23_broadcast_two_tensors; -mod min_i32_three_tensors; -mod min_i32_broadcast_three_tensors; -mod min_i32_two_tensors; -mod min_i32_broadcast_two_tensors; -mod min_i8_three_tensors; -mod min_i8_broadcast_three_tensors; -mod min_i8_two_tensors; -mod min_i8_broadcast_two_tensors; -mod min_u32_three_tensors; -mod min_u32_broadcast_three_tensors; -mod min_u32_two_tensors; -mod min_u32_broadcast_two_tensors; -mod where_fp16x16; -mod where_fp16x16_broadcast; -mod where_fp8x23; -mod where_fp8x23_broadcast; -mod where_i32; -mod where_i32_broadcast; -mod where_i8; -mod where_i8_broadcast; -mod where_u32; -mod where_u32_broadcast; -mod not_bool; -mod round_fp16x16; -mod round_fp8x23; -mod max_fp16x16_three_tensors; -mod max_fp16x16_broadcast_three_tensors; -mod max_fp16x16_two_tensors; -mod max_fp16x16_broadcast_two_tensors; -mod max_fp8x23_three_tensors; -mod max_fp8x23_broadcast_three_tensors; -mod max_fp8x23_two_tensors; -mod max_fp8x23_broadcast_two_tensors; -mod max_i32_three_tensors; -mod max_i32_broadcast_three_tensors; -mod max_i32_two_tensors; -mod max_i32_broadcast_two_tensors; -mod max_i8_three_tensors; -mod max_i8_broadcast_three_tensors; -mod max_i8_two_tensors; -mod max_i8_broadcast_two_tensors; -mod max_u32_three_tensors; -mod max_u32_broadcast_three_tensors; -mod max_u32_two_tensors; -mod max_u32_broadcast_two_tensors; -mod scatter_fp16x16_3d_default; -mod scatter_fp16x16_3d_axis1; -mod scatter_fp16x16_3d_axis1_add; -mod scatter_fp8x23_default; -mod scatter_fp8x23_axis1; -mod scatter_fp8x23_mul; -mod scatter_i8_default; -mod scatter_i8_axis1; -mod scatter_i8_axis1_max; -mod scatter_u32_default; -mod scatter_u32_axis1; -mod scatter_u32_add; -mod array_feature_extractor_1D_i32; -mod array_feature_extractor_1D_fp8x23; -mod array_feature_extractor_1D_fp16x16; -mod array_feature_extractor_2D_i32; -mod array_feature_extractor_2D_fp8x23; -mod array_feature_extractor_2D_fp16x16; -mod array_feature_extractor_3D_i32; -mod array_feature_extractor_3D_fp8x23; -mod array_feature_extractor_3D_fp16x16; -mod binarizer_fp16x16; -mod binarizer_fp8x23; -mod tril_fp16x16; -mod tril_fp16x16_neg; -mod tril_fp16x16_one_row; -mod tril_fp16x16_out_neg; -mod tril_fp16x16_out_pos; -mod tril_fp16x16_pos; -mod tril_fp16x16_square; -mod tril_fp16x16_square_neg; -mod tril_fp16x16_zero; -mod triu_fp16x16; -mod triu_fp16x16_neg; -mod triu_fp16x16_one_row; -mod triu_fp16x16_out_neg; -mod triu_fp16x16_out_pos; -mod triu_fp16x16_pos; -mod triu_fp16x16_square; -mod triu_fp16x16_square_neg; -mod triu_fp16x16_zero; -mod tril_fp8x23; -mod tril_fp8x23_neg; -mod tril_fp8x23_one_row; -mod tril_fp8x23_out_neg; -mod tril_fp8x23_out_pos; -mod tril_fp8x23_pos; -mod tril_fp8x23_square; -mod tril_fp8x23_square_neg; -mod tril_fp8x23_zero; -mod triu_fp8x23; -mod triu_fp8x23_neg; -mod triu_fp8x23_one_row; -mod triu_fp8x23_out_neg; -mod triu_fp8x23_out_pos; -mod triu_fp8x23_pos; -mod triu_fp8x23_square; -mod triu_fp8x23_square_neg; -mod triu_fp8x23_zero; -mod tril_i32; -mod tril_neg_i32; -mod tril_i32_one_row; -mod tril_i32_out_neg; -mod tril_i32_out_pos; -mod tril_i32_pos; -mod tril_i32_square; -mod tril_i32_square_neg; -mod tril_i32_zero; -mod triu_i32; -mod triu_i32_neg; -mod triu_i32_one_row; -mod triu_i32_out_neg; -mod triu_i32_out_pos; -mod triu_i32_pos; -mod triu_i32_square; -mod triu_i32_square_neg; -mod triu_i32_zero; -mod tril_i8; -mod tril_i8_neg; -mod tril_i8_one_row; -mod tril_i8_out_neg; -mod tril_i8_out_pos; -mod tril_i8_pos; -mod tril_i8_square; -mod tril_i8_square_neg; -mod tril_i8_zero; -mod triu_i8; -mod triu_i8_neg; -mod triu_i8_one_row; -mod triu_i8_out_neg; -mod triu_i8_out_pos; -mod triu_i8_pos; -mod triu_i8_square; -mod triu_i8_square_neg; -mod triu_i8_zero; -mod tril_u32; -mod tril_u32_neg; -mod tril_u32_one_row; -mod tril_u32_out_neg; -mod tril_u32_out_pos; -mod tril_u32_pos; -mod tril_u32_square; -mod tril_u32_square_neg; -mod tril_u32_zero; -mod triu_u32; -mod triu_u32_neg; -mod triu_u32_one_row; -mod triu_u32_out_neg; -mod triu_u32_out_pos; -mod triu_u32_pos; -mod triu_u32_square; -mod triu_u32_square_neg; -mod triu_u32_zero; -mod reduce_sum_square_fp16x16_export_do_not_keepdims; -mod reduce_sum_square_fp16x16_export_keepdims; -mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; -mod reduce_sum_square_fp8x23_export_do_not_keepdims; -mod reduce_sum_square_fp8x23_export_keepdims; -mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; -mod reduce_sum_square_i32_export_do_not_keepdims; -mod reduce_sum_square_i32_export_keepdims; -mod reduce_sum_square_i32_export_negative_axes_keepdims; -mod reduce_sum_square_i8_export_do_not_keepdims; -mod reduce_sum_square_i8_export_keepdims; -mod reduce_sum_square_i8_export_negative_axes_keepdims; -mod reduce_sum_square_u32_export_do_not_keepdims; -mod reduce_sum_square_u32_export_keepdims; -mod reduce_sum_square_u32_export_negative_axes_keepdims; -mod reduce_l2_fp16x16_export_do_not_keepdims; -mod reduce_l2_fp16x16_export_keepdims; -mod reduce_l2_fp16x16_export_negative_axes_keepdims; -mod reduce_l2_fp8x23_export_do_not_keepdims; -mod reduce_l2_fp8x23_export_keepdims; -mod reduce_l2_fp8x23_export_negative_axes_keepdims; -mod reduce_l1_fp16x16_export_do_not_keepdims; -mod reduce_l1_fp16x16_export_keepdims; -mod reduce_l1_fp16x16_export_negative_axes_keepdims; -mod reduce_l1_fp8x23_export_do_not_keepdims; -mod reduce_l1_fp8x23_export_keepdims; -mod reduce_l1_fp8x23_export_negative_axes_keepdims; -mod reduce_l1_i32_export_do_not_keepdims; -mod reduce_l1_i32_export_keepdims; -mod reduce_l1_i32_export_negative_axes_keepdims; -mod reduce_l1_i8_export_do_not_keepdims; -mod reduce_l1_i8_export_keepdims; -mod reduce_l1_i8_export_negative_axes_keepdims; -mod reduce_l1_u32_export_do_not_keepdims; -mod reduce_l1_u32_export_keepdims; -mod reduce_l1_u32_export_negative_axes_keepdims; -mod reduce_prod_fp16x16_1D; -mod reduce_prod_fp16x16_2D_default; -mod reduce_prod_fp16x16_2D_keepdims; -mod reduce_prod_fp16x16_2D_axis_1; -mod reduce_prod_fp8x23_1D; -mod reduce_prod_fp8x23_2D_default; -mod reduce_prod_fp8x23_2D_keepdims; -mod reduce_prod_fp8x23_2D_axis_1; -mod reduce_prod_i32_1D; -mod reduce_prod_i32_2D_default; -mod reduce_prod_i32_2D_keepdims; -mod reduce_prod_i32_2D_axis_1; -mod reduce_prod_i8_1D; -mod reduce_prod_i8_2D_default; -mod reduce_prod_i8_2D_keepdims; -mod reduce_prod_i8_2D_axis_1; -mod reduce_prod_u32_1D; -mod reduce_prod_u32_2D_default; -mod reduce_prod_u32_2D_keepdims; -mod reduce_prod_u32_2D_axis_1; -mod gather_elements_fp16x16_3d_default; -mod gather_elements_fp16x16_3d_axis1; -mod gather_elements_fp16x16_3d_axis2; -mod gather_elements_fp8x23_3d_default; -mod gather_elements_fp8x23_3d_axis1; -mod gather_elements_fp8x23_3d_axis2; -mod gather_elements_i8_3d_default; -mod gather_elements_i8_3d_axis1; -mod gather_elements_i32_3d_default; -mod gather_elements_i32_3d_axis1; -mod gather_elements_i32_3d_axis2; -mod gather_elements_u32_default; -mod gather_elements_u32_axis1; -mod gather_elements_u32_axis2; -mod gather_elements_u32_axis3; -mod sequence_length_fp16x16; -mod sequence_length_fp16x16_broadcast; -mod sequence_length_fp8x23; -mod sequence_length_fp8x23_broadcast; -mod sequence_length_i32; -mod sequence_length_i32_broadcast; -mod sequence_length_i8; -mod sequence_length_i8_broadcast; -mod sequence_length_u32; -mod sequence_length_u32_broadcast; -mod sequence_at_u32_positive; -mod sequence_at_u32_negative; -mod sequence_at_fp16x16_positive; -mod sequence_at_fp16x16_negative; -mod sequence_at_fp8x23_positive; -mod sequence_at_fp8x23_negative; -mod sequence_at_i32_positive; -mod sequence_at_i32_negative; -mod sequence_at_i8_positive; -mod sequence_at_i8_negative; -mod reduce_min_fp16x16_1D; -mod reduce_min_fp16x16_2D_default; -mod reduce_min_fp16x16_2D_keepdims; -mod reduce_min_fp16x16_2D_axis_1; -mod reduce_min_fp8x23_1D; -mod reduce_min_fp8x23_2D_default; -mod reduce_min_fp8x23_2D_keepdims; -mod reduce_min_fp8x23_2D_axis_1; -mod reduce_min_i32_1D; -mod reduce_min_i32_2D_default; -mod reduce_min_i32_2D_keepdims; -mod reduce_min_i32_2D_axis_1; -mod reduce_min_i8_1D; -mod reduce_min_i8_2D_default; -mod reduce_min_i8_2D_keepdims; -mod reduce_min_i8_2D_axis_1; -mod reduce_min_u32_1D; -mod reduce_min_u32_2D_default; -mod reduce_min_u32_2D_keepdims; -mod reduce_min_u32_2D_axis_1; -mod sequence_construct_fp16x16; -mod sequence_construct_fp8x23; -mod sequence_construct_i32; -mod sequence_construct_i8; -mod sequence_construct_u32; -mod shrink_hard_fp16x16; -mod shrink_soft_fp16x16; -mod shrink_hard_fp8x23; -mod shrink_soft_fp8x23; -mod sequence_empty_fp16x16; -mod sequence_empty_fp8x23; -mod sequence_empty_i32; -mod sequence_empty_i8; -mod sequence_empty_u32; -mod reduce_mean_fp16x16_1D; -mod reduce_mean_fp16x16_2D_default; -mod reduce_mean_fp16x16_2D_keepdims; -mod reduce_mean_fp16x16_2D_axis_1; -mod reduce_mean_fp8x23_1D; -mod reduce_mean_fp8x23_2D_default; -mod reduce_mean_fp8x23_2D_keepdims; -mod reduce_mean_fp8x23_2D_axis_1; -mod reduce_mean_i32_1D; -mod reduce_mean_i32_2D_default; -mod reduce_mean_i32_2D_keepdims; -mod reduce_mean_i32_2D_axis_1; -mod reduce_mean_i8_1D; -mod reduce_mean_i8_2D_default; -mod reduce_mean_i8_2D_keepdims; -mod reduce_mean_i8_2D_axis_1; -mod reduce_mean_u32_1D; -mod reduce_mean_u32_2D_default; -mod reduce_mean_u32_2D_keepdims; -mod reduce_mean_u32_2D_axis_1; -mod pow_fp16x16; -mod pow_fp16x16_broadcast; -mod pow_fp8x23; -mod pow_fp8x23_broadcast; -mod sequence_erase_u32_positive; -mod sequence_erase_u32_negative; -mod sequence_erase_u32_empty; -mod sequence_erase_fp16x16_positive; -mod sequence_erase_fp16x16_negative; -mod sequence_erase_fp16x16_empty; -mod sequence_erase_fp8x23_positive; -mod sequence_erase_fp8x23_negative; -mod sequence_erase_fp8x23_empty; -mod sequence_erase_i32_positive; -mod sequence_erase_i32_negative; -mod sequence_erase_i32_empty; -mod sequence_erase_i8_positive; -mod sequence_erase_i8_negative; -mod sequence_erase_i8_empty; -mod sequence_insert_fp16x16; -mod sequence_insert_fp8x23; -mod sequence_insert_i32; -mod sequence_insert_i8; -mod sequence_insert_u32; -mod concat_from_sequence_fp8x23_new_axis_zero; -mod concat_from_sequence_fp8x23_new_axis_one; -mod concat_from_sequence_fp8x23_new_axis_default; -mod concat_from_sequence_fp16x16_new_axis_zero; -mod concat_from_sequence_fp16x16_new_axis_one; -mod concat_from_sequence_fp16x16_new_axis_default; -mod concat_from_sequence_i32_new_axis_zero; -mod concat_from_sequence_i32_new_axis_one; -mod concat_from_sequence_i32_new_axis_default; -mod concat_from_sequence_i8_new_axis_zero; -mod concat_from_sequence_i8_new_axis_one; -mod concat_from_sequence_i8_new_axis_default; -mod concat_from_sequence_u32_new_axis_zero; -mod concat_from_sequence_u32_new_axis_one; -mod concat_from_sequence_u32_new_axis_default; -mod is_nan_fp16x16; -mod is_nan_fp8x23; -mod is_inf_fp16x16; -mod is_inf_fp8x23; -mod is_inf_i32; -mod is_inf_i8; -mod is_inf_u32; -mod is_pos_inf_fp16x16; -mod is_neg_inf_fp16x16; -mod is_pos_inf_fp8x23; -mod is_neg_inf_fp8x23; -mod is_pos_inf_i32; -mod is_neg_inf_i32; -mod is_pos_inf_i8; -mod is_neg_inf_i8; -mod reduce_log_sum_fp8x23_export_do_not_keepdims; -mod reduce_log_sum_fp8x23_export_keepdims; -mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; -mod reduce_log_sum_fp16x16_export_do_not_keepdims; -mod reduce_log_sum_fp16x16_export_keepdims; -mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; -mod and_bool; -mod erf_fp16x16; -mod erf_fp8x23; -mod unique_fp16x16_without_axis_sorted; -mod unique_fp16x16_with_axis_zero_sorted; -mod unique_u32_without_axis_sorted; -mod unique_u32_without_axis_not_sorted; -mod unique_u32_with_axis_zero_sorted; -mod unique_u32_with_axis_zero_not_sorted; -mod unique_u32_with_axis_one_sorted; -mod unique_u32_with_axis_one_not_sorted; -mod gather_nd_fp16x16_3d_default; -mod gather_nd_fp16x16_3d_batch_dims1; -mod gather_nd_fp16x16_3d_batch_dims2; -mod gather_nd_fp8x23_3d_default; -mod gather_nd_fp8x23_3d_batch_dims1; -mod gather_nd_fp8x23_3d_batch_dims2; -mod gather_nd_i32_3d_default; -mod gather_nd_i32_3d_batch_dims1; -mod gather_nd_i32_3d_batch_dims2; -mod gather_nd_i8_3d_default; -mod gather_nd_i8_3d_batch_dims1; -mod gather_nd_u32_default; -mod gather_nd_u32_batch_dims1; -mod gather_nd_u32_batch_dims2; -mod resize_upsample_scales_nearest; -mod resize_downsample_scales_cubic; -mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; -mod resize_downsample_scales_cubic_align_corners; -mod resize_upsample_scales_linear; -mod resize_downsample_scales_linear_align_corners; -mod resize_downsample_scales_nearest; -mod resize_upsample_scales_cubic; -mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; -mod resize_upsample_scales_cubic_align_corners; -mod resize_upsample_scales_cubic_asymmetric; -mod resize_upsample_scales_linear_align_corners; -mod resize_upsample_sizes_nearest; -mod resize_upsample_sizes_cubic; -mod resize_downsample_sizes_cubic; -mod resize_downsample_sizes_nearest; -mod resize_upsample_scales_linear_half_pixel_symmetric; -mod resize_downsample_scales_cubic_antialias; -mod resize_downsample_scales_linear_antialias; -mod resize_downsample_sizes_cubic_antialias; -mod resize_downsample_sizes_linear_pytorch_half_pixel; -mod resize_tf_crop_and_resize; -mod resize_tf_crop_and_resize_extrapolation_value; -mod resize_upsample_scales_nearest_axes_2_3; -mod resize_upsample_scales_nearest_axes_3_2; -mod resize_upsample_sizes_nearest_axes_2_3; -mod resize_upsample_sizes_nearest_ceil_half_pixel; -mod resize_upsample_sizes_nearest_floor_align_corners; -mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; -mod resize_downsample_scales_linear_half_pixel_symmetric; -mod resize_downsample_sizes_nearest_not_larger; -mod resize_downsample_sizes_nearest_not_smaller; -mod resize_tf_crop_and_resize_axes_2_3; -mod resize_tf_crop_and_resize_axes_3_2; -mod resize_upsample_sizes_nearest_axes_3_2; -mod resize_upsample_sizes_nearest_not_larger; -mod resize_upsample_sizes_nearest_not_smaller; -mod compress_fp16x16_3d_default; -mod compress_fp16x16_3d_axis1; -mod compress_fp16x16_3d_axis2; -mod compress_fp16x16_3d_axis3; -mod compress_fp16x16_3d_noaxis; -mod compress_fp8x23_3d_default; -mod compress_fp8x23_3d_axis1; -mod compress_fp8x23_3d_axis2; -mod compress_i32_3d_default; -mod compress_i32_3d_axis1; -mod compress_i32_3d_axis2; -mod compress_i8_3d_default; -mod compress_i8_3d_axis1; -mod compress_i8_3d_axis2; -mod compress_u32_3d_default; -mod compress_u32_3d_axis1; -mod compress_u32_3d_axis2; -mod compress_u32_3d_axis2_2; -mod compress_u32_3d_axis3; -mod reduce_log_sum_exp_fp32x32_export_do_not_keepdims; -mod reduce_log_sum_exp_fp32x32_export_keepdims; -mod reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims; -mod layer_normalization_default_axis; -mod layer_normalization_4d_axis0; -mod layer_normalization_4d_axis1; -mod layer_normalization_4d_axis2; -mod layer_normalization_4d_axis3; -mod layer_normalization_3d_axis0_epsilon; -mod layer_normalization_3d_axis_negative_3_epsilon; -mod layer_normalization_3d_axis1_epsilon; -mod layer_normalization_3d_axis2_epsilon; -mod layer_normalization_4d_axis_negative_4; -mod layer_normalization_4d_axis_negative_3; -mod layer_normalization_4d_axis_negative_2; -mod layer_normalization_4d_axis_negative_1; -mod layer_normalization_3d_axis_negative_2_epsilon; -mod layer_normalization_3d_axis_negative_1_epsilon; -mod layer_normalization_test; -mod split_u32_1d_equal_parts; -mod split_u32_2d_equal_parts; -mod split_u32_zero_size; -mod split_u32_1d_variable_parts; -mod split_u32_2d_variable_parts; -mod split_u32_1d_uneven; -mod split_u32_2d_uneven; -mod split_fp16x16_1d_equal_parts; -mod split_fp16x16_1d_variable_parts; -mod split_fp16x16_2d_equal_parts; -mod split_fp16x16_2d_variable_parts; -mod split_fp16x16_zero_size; -mod split_fp16x16_1d_uneven; -mod split_fp16x16_2d_uneven; -mod grid_sample; -mod grid_sample_cubic; -mod grid_sample_aligncorners; -mod grid_sample_nearest; -mod grid_sample_nearest_aligncorner; -mod grid_sample_padding_border; -mod grid_sample_padding_reflection; -mod grid_sample_padding_zeros; -mod col2im; -mod col2im_5D; -mod col2im_dilations; -mod col2im_pads; -mod col2im_strides; -mod random_uniform_like_fp16x16; -mod random_uniform_like_fp8x23; -mod range_fp8x23; -mod range_fp16x16; -mod range_i32; -mod range_i8; -mod range_u32; -mod hann_window_fp8x23; -mod hann_window_fp16x16; -mod hamming_window_fp16x16; -mod hamming_window_fp8x23; -mod blackman_window_fp16x16; -mod blackman_window_fp8x23; -mod split_to_sequence_fp16x16_1d_equal_parts; -mod split_to_sequence_fp16x16_1d_variable_parts; -mod split_to_sequence_fp16x16_2d_equal_parts; -mod split_to_sequence_fp16x16_2d_variable_parts; -mod split_to_sequence_fp16x16_zero_size; -mod split_to_sequence_fp16x16_1d_uneven; -mod split_to_sequence_fp16x16_2d_uneven; -mod split_to_sequence_u32_1d_equal_parts; -mod split_to_sequence_u32_1d_variable_parts; -mod split_to_sequence_u32_2d_equal_parts; -mod split_to_sequence_u32_2d_variable_parts; -mod split_to_sequence_u32_zero_size; -mod split_to_sequence_u32_1d_uneven; -mod split_to_sequence_u32_2d_uneven; -mod split_to_sequence_2d_scalar; -mod split_to_sequence_2d_nokeepdims; -mod split_to_sequence_1d_nokeepdims; -mod reverse_sequence_fp16x16_batch_equal_parts; -mod reverse_sequence_fp16x16_time_equal_parts; -mod reverse_sequence_i32_batch_equal_parts; -mod reverse_sequence_i32_time_equal_parts; -mod reverse_sequence_i8_batch_equal_parts; -mod reverse_sequence_i8_time_equal_parts; -mod reverse_sequence_u32_4x4_batch; -mod reverse_sequence_u32_4x4_time; -mod reverse_sequence_u32_3x3_batch; -mod reverse_sequence_u32_3x3_time; -mod reverse_sequence_different_dimensions_4_5; -mod reverse_sequence_different_dimensions_2_4; -mod reverse_sequence_different_dimensions_1_6; -mod reverse_sequence_different_dimensions_3x9_batch; -mod reverse_sequence_different_dimensions_3x9_time; -mod conv_transpose; -mod conv_transpose_1d; -mod conv_transpose_3d; -mod conv_transpose_attributes; -mod conv_transpose_autopad_same; -mod conv_transpose_dilations; -mod conv_transpose_pads; -mod conv_transpose_group_2; -mod conv_transpose_group_2_image_3; -mod depth_to_space_fp16x16; -mod depth_to_space_fp8x23; -mod depth_to_space_i32; -mod depth_to_space_i8; -mod depth_to_space_u32; -mod space_to_depth_fp16x16; -mod space_to_depth_fp8x23; -mod space_to_depth_i32; -mod space_to_depth_i8; -mod space_to_depth_u32; -mod scatter_nd_fp16x16_3d_default; -mod scatter_nd_fp16x16_3d_add; -mod scatter_nd_fp16x16_3d_mul; -mod scatter_nd_fp16x16_3d_max; -mod scatter_nd_fp16x16_3d_min; -mod scatter_nd_fp8x23_3d_default; -mod scatter_nd_fp8x23_3d_add; -mod scatter_nd_fp8x23_3d_mul; -mod scatter_nd_fp8x23_3d_max; -mod scatter_nd_fp8x23_3d_min; -mod scatter_nd_u32_default; -mod scatter_nd_u32_add; -mod scatter_nd_u32_mul; -mod scatter_nd_u32_max; -mod scatter_nd_u32_min; -mod conv_2D_with_padding; -mod conv_1D_no_padding; -mod conv_1D_with_padding; -mod conv_3D_no_padding; -mod conv_3D_with_padding; -mod conv_4D_no_padding; -mod conv_2D_with_2_groups; -mod conv_2D_with_autopad_same; -mod conv_2D_with_strides_asymmetric_padding; -mod conv_2D_with_strides_with_padding; -mod conv_4D_with_padding; -mod label_encoder_fp16x16_3d_default; -mod label_encoder_fp8x23_default; -mod label_encoder_i8_default; -mod label_encoder_i32_default; -mod label_encoder_u32_default; +// mod abs_fp16x16; +// mod abs_fp8x23; +// mod abs_i32; +// mod abs_i8; +// mod acos_fp16x16; +// mod acos_fp8x23; +// mod acosh_fp16x16; +// mod acosh_fp8x23; +// mod add_fp16x16; +// mod add_fp16x16_broadcast; +// mod add_fp8x23; +// mod add_fp8x23_broadcast; +// mod add_i32; +// mod add_i32_broadcast; +// mod add_i8; +// mod add_i8_broadcast; +// mod add_u32; +// mod add_u32_broadcast; +// mod argmax_fp16x16_1D_default; +// mod argmax_fp16x16_1D_keepdims_false; +// mod argmax_fp16x16_1D_last_index; +// mod argmax_fp16x16_2D_default; +// mod argmax_fp16x16_2D_keepdims_false; +// mod argmax_fp16x16_2D_last_index; +// mod argmax_fp16x16_3D_default; +// mod argmax_fp16x16_3D_keepdims_false; +// mod argmax_fp16x16_3D_last_index; +// mod argmax_fp8x23_1D_default; +// mod argmax_fp8x23_1D_keepdims_false; +// mod argmax_fp8x23_1D_last_index; +// mod argmax_fp8x23_2D_default; +// mod argmax_fp8x23_2D_keepdims_false; +// mod argmax_fp8x23_2D_last_index; +// mod argmax_fp8x23_3D_default; +// mod argmax_fp8x23_3D_keepdims_false; +// mod argmax_fp8x23_3D_last_index; +// mod argmax_i32_1D_default; +// mod argmax_i32_1D_keepdims_false; +// mod argmax_i32_1D_last_index; +// mod argmax_i32_2D_default; +// mod argmax_i32_2D_keepdims_false; +// mod argmax_i32_2D_last_index; +// mod argmax_i32_3D_default; +// mod argmax_i32_3D_keepdims_false; +// mod argmax_i32_3D_last_index; +// mod argmax_i8_1D_default; +// mod argmax_i8_1D_keepdims_false; +// mod argmax_i8_1D_last_index; +// mod argmax_i8_2D_default; +// mod argmax_i8_2D_keepdims_false; +// mod argmax_i8_2D_last_index; +// mod argmax_i8_3D_default; +// mod argmax_i8_3D_keepdims_false; +// mod argmax_i8_3D_last_index; +// mod argmax_u32_1D_default; +// mod argmax_u32_1D_keepdims_false; +// mod argmax_u32_1D_last_index; +// mod argmax_u32_2D_default; +// mod argmax_u32_2D_keepdims_false; +// mod argmax_u32_2D_last_index; +// mod argmax_u32_3D_default; +// mod argmax_u32_3D_keepdims_false; +// mod argmax_u32_3D_last_index; +// mod argmin_fp16x16_1D_default; +// mod argmin_fp16x16_1D_keepdims_false; +// mod argmin_fp16x16_1D_last_index; +// mod argmin_fp16x16_2D_default; +// mod argmin_fp16x16_2D_keepdims_false; +// mod argmin_fp16x16_2D_last_index; +// mod argmin_fp16x16_3D_default; +// mod argmin_fp16x16_3D_keepdims_false; +// mod argmin_fp16x16_3D_last_index; +// mod argmin_fp8x23_1D_default; +// mod argmin_fp8x23_1D_keepdims_false; +// mod argmin_fp8x23_1D_last_index; +// mod argmin_fp8x23_2D_default; +// mod argmin_fp8x23_2D_keepdims_false; +// mod argmin_fp8x23_2D_last_index; +// mod argmin_fp8x23_3D_default; +// mod argmin_fp8x23_3D_keepdims_false; +// mod argmin_fp8x23_3D_last_index; +// mod argmin_i32_1D_default; +// mod argmin_i32_1D_keepdims_false; +// mod argmin_i32_1D_last_index; +// mod argmin_i32_2D_default; +// mod argmin_i32_2D_keepdims_false; +// mod argmin_i32_2D_last_index; +// mod argmin_i32_3D_default; +// mod argmin_i32_3D_keepdims_false; +// mod argmin_i32_3D_last_index; +// mod argmin_i8_1D_default; +// mod argmin_i8_1D_keepdims_false; +// mod argmin_i8_1D_last_index; +// mod argmin_i8_2D_default; +// mod argmin_i8_2D_keepdims_false; +// mod argmin_i8_2D_last_index; +// mod argmin_i8_3D_default; +// mod argmin_i8_3D_keepdims_false; +// mod argmin_i8_3D_last_index; +// mod argmin_u32_1D_default; +// mod argmin_u32_1D_keepdims_false; +// mod argmin_u32_1D_last_index; +// mod argmin_u32_2D_default; +// mod argmin_u32_2D_keepdims_false; +// mod argmin_u32_2D_last_index; +// mod argmin_u32_3D_default; +// mod argmin_u32_3D_keepdims_false; +// mod argmin_u32_3D_last_index; +// mod asin_fp16x16; +// mod asin_fp8x23; +// mod asinh_fp16x16; +// mod asinh_fp8x23; +// mod atan_fp16x16; +// mod atan_fp8x23; +// mod ceil_fp16x16; +// mod ceil_fp8x23; +// mod concat_fp16x16_1d; +// mod concat_fp16x16_2d; +// mod concat_fp16x16_3d_default; +// mod concat_fp16x16_3d_axis_1; +// mod concat_fp16x16_3d_axis_2; +// mod concat_fp16x16_3d_three_tensors_axis_1; +// mod concat_fp16x16_3d_three_tensors_axis_2; +// mod concat_fp8x23_1d; +// mod concat_fp8x23_2d; +// mod concat_fp8x23_3d_default; +// mod concat_fp8x23_3d_axis_1; +// mod concat_fp8x23_3d_axis_2; +// mod concat_fp8x23_3d_three_tensors_axis_1; +// mod concat_fp8x23_3d_three_tensors_axis_2; +// mod concat_i32_1d; +// mod concat_i32_2d; +// mod concat_i32_3d_default; +// mod concat_i32_3d_axis_1; +// mod concat_i32_3d_axis_2; +// mod concat_i32_3d_three_tensors_axis_1; +// mod concat_i32_3d_three_tensors_axis_2; +// mod concat_i8_1d; +// mod concat_i8_2d; +// mod concat_i8_3d_default; +// mod concat_i8_3d_axis_1; +// mod concat_i8_3d_axis_2; +// mod concat_i8_3d_three_tensors_axis_1; +// mod concat_i8_3d_three_tensors_axis_2; +// mod concat_u32_1d; +// mod concat_u32_2d; +// mod concat_u32_3d_default; +// mod concat_u32_3d_axis_1; +// mod concat_u32_3d_axis_2; +// mod concat_u32_3d_three_tensors_axis_1; +// mod concat_u32_3d_three_tensors_axis_2; +// mod cos_fp16x16; +// mod cos_fp8x23; +// mod cosh_fp16x16; +// mod cosh_fp8x23; +// mod cumsum_fp16x16_1d_default; +// mod cumsum_fp16x16_1d_exclusive; +// mod cumsum_fp16x16_1d_reverse; +// mod cumsum_fp16x16_1d_reverse_exclusive; +// mod cumsum_fp16x16_2d_axis_0; +// mod cumsum_fp16x16_2d_axis_1; +// mod cumsum_fp8x23_1d_default; +// mod cumsum_fp8x23_1d_exclusive; +// mod cumsum_fp8x23_1d_reverse; +// mod cumsum_fp8x23_1d_reverse_exclusive; +// mod cumsum_fp8x23_2d_axis_0; +// mod cumsum_fp8x23_2d_axis_1; +// mod cumsum_i32_1d_default; +// mod cumsum_i32_1d_exclusive; +// mod cumsum_i32_1d_reverse; +// mod cumsum_i32_1d_reverse_exclusive; +// mod cumsum_i32_2d_axis_0; +// mod cumsum_i32_2d_axis_1; +// mod cumsum_i8_1d_default; +// mod cumsum_i8_1d_exclusive; +// mod cumsum_i8_1d_reverse; +// mod cumsum_i8_1d_reverse_exclusive; +// mod cumsum_i8_2d_axis_0; +// mod cumsum_i8_2d_axis_1; +// mod cumsum_u32_1d_default; +// mod cumsum_u32_1d_exclusive; +// mod cumsum_u32_1d_reverse; +// mod cumsum_u32_1d_reverse_exclusive; +// mod cumsum_u32_2d_axis_0; +// mod cumsum_u32_2d_axis_1; +// mod div_fp16x16; +// mod div_fp16x16_broadcast; +// mod div_fp8x23; +// mod div_fp8x23_broadcast; +// mod div_i32; +// mod div_i32_broadcast; +// mod div_i8; +// mod div_i8_broadcast; +// mod div_u32; +// mod div_u32_broadcast; +// mod equal_fp16x16; +// mod equal_fp16x16_broadcast; +// mod equal_fp8x23; +// mod equal_fp8x23_broadcast; +// mod equal_i32; +// mod equal_i32_broadcast; +// mod equal_i8; +// mod equal_i8_broadcast; +// mod equal_u32; +// mod equal_u32_broadcast; +// mod exp_fp16x16; +// mod exp_fp8x23; +// mod less_equal_fp16x16; +// mod less_equal_fp16x16_broadcast; +// mod less_equal_fp8x23; +// mod less_equal_fp8x23_broadcast; +// mod less_equal_i32; +// mod less_equal_i32_broadcast; +// mod less_equal_i8; +// mod less_equal_i8_broadcast; +// mod less_equal_u32; +// mod less_equal_u32_broadcast; +// mod greater_fp16x16; +// mod greater_fp16x16_broadcast; +// mod greater_fp8x23; +// mod greater_fp8x23_broadcast; +// mod greater_i32; +// mod greater_i32_broadcast; +// mod greater_i8; +// mod greater_i8_broadcast; +// mod greater_u32; +// mod greater_u32_broadcast; +// mod leaky_relu_fp16x16; +// mod leaky_relu_fp8x23; +// mod linear_fp16x16; +// mod linear_fp8x23; +// mod linear_i32; +// mod linear_i8; +// mod linear_u32; +// mod log_fp16x16; +// mod log_fp8x23; +// mod logsoftmax_fp16x16_axis_0; +// mod logsoftmax_fp16x16_axis_1; +// mod logsoftmax_fp8x23_axis_0; +// mod logsoftmax_fp8x23_axis_1; +// mod matmul_fp16x16_1d; +// mod matmul_fp16x16_2x2; +// mod matmul_fp16x16_2x1; +// mod matmul_fp16x16_1x2; +// mod matmul_fp8x23_1d; +// mod matmul_fp8x23_2x2; +// mod matmul_fp8x23_2x1; +// mod matmul_fp8x23_1x2; +// mod matmul_i32_1d; +// mod matmul_i32_2x2; +// mod matmul_i32_2x1; +// mod matmul_i32_1x2; +// mod matmul_i8_1d; +// mod matmul_i8_2x2; +// mod matmul_i8_2x1; +// mod matmul_i8_1x2; +// mod matmul_u32_1d; +// mod matmul_u32_2x2; +// mod matmul_u32_2x1; +// mod matmul_u32_1x2; +// mod mul_fp16x16; +// mod mul_fp16x16_broadcast; +// mod mul_fp8x23; +// mod mul_fp8x23_broadcast; +// mod mul_i32; +// mod mul_i32_broadcast; +// mod mul_i8; +// mod mul_i8_broadcast; +// mod mul_u32; +// mod mul_u32_broadcast; +// mod or_fp16x16; +// mod or_fp16x16_broadcast; +// mod or_fp8x23; +// mod or_fp8x23_broadcast; +// mod or_i32; +// mod or_i32_broadcast; +// mod or_i8; +// mod or_i8_broadcast; +// mod or_u32; +// mod or_u32_broadcast; +// mod reduce_sum_fp16x16_1D; +// mod reduce_sum_fp16x16_2D_default; +// mod reduce_sum_fp16x16_2D_keepdims; +// mod reduce_sum_fp16x16_2D_axis_1; +// mod reduce_sum_fp8x23_1D; +// mod reduce_sum_fp8x23_2D_default; +// mod reduce_sum_fp8x23_2D_keepdims; +// mod reduce_sum_fp8x23_2D_axis_1; +// mod reduce_sum_i32_1D; +// mod reduce_sum_i32_2D_default; +// mod reduce_sum_i32_2D_keepdims; +// mod reduce_sum_i32_2D_axis_1; +// mod reduce_sum_i8_1D; +// mod reduce_sum_i8_2D_default; +// mod reduce_sum_i8_2D_keepdims; +// mod reduce_sum_i8_2D_axis_1; +// mod reduce_sum_u32_1D; +// mod reduce_sum_u32_2D_default; +// mod reduce_sum_u32_2D_keepdims; +// mod reduce_sum_u32_2D_axis_1; +// mod relu_fp16x16; +// mod relu_fp8x23; +// mod relu_i32; +// mod relu_i8; +// mod sigmoid_fp16x16; +// mod sigmoid_fp8x23; +// mod sin_fp16x16; +// mod sin_fp8x23; +// mod sinh_fp16x16; +// mod sinh_fp8x23; +// mod softmax_fp16x16; +// mod softmax_fp8x23; +// mod softplus_fp8x23; +// mod softplus_fp16x16; +// mod softsign_fp8x23; +// mod softsign_fp16x16; +// mod sqrt_fp16x16; +// mod sqrt_fp8x23; +// mod sub_fp16x16; +// mod sub_fp16x16_broadcast; +// mod sub_fp8x23; +// mod sub_fp8x23_broadcast; +// mod sub_i32; +// mod sub_i32_broadcast; +// mod sub_i8; +// mod sub_i8_broadcast; +// mod sub_u32; +// mod sub_u32_broadcast; +// mod tanh_fp16x16; +// mod tanh_fp8x23; +// mod transpose_fp16x16_2d; +// mod transpose_fp16x16_3d; +// mod transpose_fp8x23_2d; +// mod transpose_fp8x23_3d; +// mod transpose_i32_2d; +// mod transpose_i32_3d; +// mod transpose_i8_2d; +// mod transpose_i8_3d; +// mod transpose_u32_2d; +// mod transpose_u32_3d; +// mod xor_fp16x16; +// mod xor_fp16x16_broadcast; +// mod xor_fp8x23; +// mod xor_fp8x23_broadcast; +// mod xor_i32; +// mod xor_i32_broadcast; +// mod xor_i8; +// mod xor_i8_broadcast; +// mod xor_u32; +// mod xor_u32_broadcast; +// mod less_fp16x16; +// mod less_fp16x16_broadcast; +// mod less_fp8x23; +// mod less_fp8x23_broadcast; +// mod less_i32; +// mod less_i32_broadcast; +// mod less_i8; +// mod less_i8_broadcast; +// mod less_u32; +// mod less_u32_broadcast; +// mod greater_equal_fp16x16; +// mod greater_equal_fp16x16_broadcast; +// mod greater_equal_fp8x23; +// mod greater_equal_fp8x23_broadcast; +// mod greater_equal_i32; +// mod greater_equal_i32_broadcast; +// mod greater_equal_i8; +// mod greater_equal_i8_broadcast; +// mod greater_equal_u32; +// mod greater_equal_u32_broadcast; +// mod slice_fp16x16_2d; +// mod slice_fp16x16_3d; +// mod slice_fp8x23_2d; +// mod slice_fp8x23_3d; +// mod slice_i32_2d; +// mod slice_i32_3d; +// mod slice_i8_2d; +// mod slice_i8_3d; +// mod slice_u32_2d; +// mod slice_u32_3d; +// mod gather_fp8x23_3d_default; +// mod gather_fp8x23_3d_axis1; +// mod gather_fp8x23_3d_axis2; +// mod gather_fp16x16_3d_default; +// mod gather_fp16x16_3d_axis1; +// mod gather_fp16x16_3d_axis2; +// mod gather_i8_3d_default; +// mod gather_i8_3d_axis1; +// mod gather_i8_3d_axis2; +// mod gather_i32_3d_default; +// mod gather_i32_3d_axis1; +// mod gather_i32_3d_axis2; +// mod gather_u32_3d_default; +// mod gather_u32_3d_axis1; +// mod gather_u32_3d_axis2; +// mod nonzero_fp16x16_2d; +// mod nonzero_fp16x16_3d; +// mod nonzero_fp8x23_2d; +// mod nonzero_fp8x23_3d; +// mod nonzero_i32_2d; +// mod nonzero_i32_3d; +// mod nonzero_i8_2d; +// mod nonzero_i8_3d; +// mod nonzero_u32_2d; +// mod nonzero_u32_3d; +// mod squeeze_fP16x16; +// mod squeeze_fP8x23; +// mod squeeze_i32; +// mod squeeze_i8; +// mod squeeze_u32; +// mod unsqueeze_fp16x16_2d; +// mod unsqueeze_fp16x16_3d; +// mod unsqueeze_fp8x23_2d; +// mod unsqueeze_fp8x23_3d; +// mod unsqueeze_i32_2d; +// mod unsqueeze_i32_3d; +// mod unsqueeze_i8_2d; +// mod unsqueeze_i8_3d; +// mod unsqueeze_u32_2d; +// mod unsqueeze_u32_3d; +// mod sign_fP16x16; +// mod sign_fP8x23; +// mod sign_fail; +// mod sign_i32; +// mod sign_i8; +// mod clip_fp16x16_2d; +// mod clip_fp16x16_3d; +// mod clip_fp8x23_2d; +// mod clip_fp8x23_3d; +// mod clip_i32_2d; +// mod clip_i32_3d; +// mod clip_i8_2d; +// mod clip_i8_3d; +// mod clip_u32_2d; +// mod clip_u32_3d; +// mod identity_fP16x16; +// mod identity_fP8x23; +// mod identity_i32; +// mod identity_i8; +// mod identity_u32; +// mod thresholded_relu_fp16x16; +// mod thresholded_relu_fp8x23; +// mod hard_sigmoid_fp8x23; +// mod hard_sigmoid_fp16x16; +// mod neg_fp16x16; +// mod neg_fp8x23; +// mod neg_i32; +// mod neg_i8; +// mod gemm_all_attributes; +// mod gemm_alpha; +// mod gemm_beta; +// mod gemm_default_matrix_bias; +// mod gemm_default_vector_bias; +// mod gemm_default_no_bias; +// mod gemm_transposeA; +// mod gemm_transposeB; +// mod min_fp16x16_three_tensors; +// mod min_fp16x16_broadcast_three_tensors; +// mod min_fp16x16_two_tensors; +// mod min_fp16x16_broadcast_two_tensors; +// mod min_fp8x23_three_tensors; +// mod min_fp8x23_broadcast_three_tensors; +// mod min_fp8x23_two_tensors; +// mod min_fp8x23_broadcast_two_tensors; +// mod min_i32_three_tensors; +// mod min_i32_broadcast_three_tensors; +// mod min_i32_two_tensors; +// mod min_i32_broadcast_two_tensors; +// mod min_i8_three_tensors; +// mod min_i8_broadcast_three_tensors; +// mod min_i8_two_tensors; +// mod min_i8_broadcast_two_tensors; +// mod min_u32_three_tensors; +// mod min_u32_broadcast_three_tensors; +// mod min_u32_two_tensors; +// mod min_u32_broadcast_two_tensors; +// mod where_fp16x16; +// mod where_fp16x16_broadcast; +// mod where_fp8x23; +// mod where_fp8x23_broadcast; +// mod where_i32; +// mod where_i32_broadcast; +// mod where_i8; +// mod where_i8_broadcast; +// mod where_u32; +// mod where_u32_broadcast; +// mod not_bool; +// mod round_fp16x16; +// mod round_fp8x23; +// mod max_fp16x16_three_tensors; +// mod max_fp16x16_broadcast_three_tensors; +// mod max_fp16x16_two_tensors; +// mod max_fp16x16_broadcast_two_tensors; +// mod max_fp8x23_three_tensors; +// mod max_fp8x23_broadcast_three_tensors; +// mod max_fp8x23_two_tensors; +// mod max_fp8x23_broadcast_two_tensors; +// mod max_i32_three_tensors; +// mod max_i32_broadcast_three_tensors; +// mod max_i32_two_tensors; +// mod max_i32_broadcast_two_tensors; +// mod max_i8_three_tensors; +// mod max_i8_broadcast_three_tensors; +// mod max_i8_two_tensors; +// mod max_i8_broadcast_two_tensors; +// mod max_u32_three_tensors; +// mod max_u32_broadcast_three_tensors; +// mod max_u32_two_tensors; +// mod max_u32_broadcast_two_tensors; +// mod scatter_fp16x16_3d_default; +// mod scatter_fp16x16_3d_axis1; +// mod scatter_fp16x16_3d_axis1_add; +// mod scatter_fp8x23_default; +// mod scatter_fp8x23_axis1; +// mod scatter_fp8x23_mul; +// mod scatter_i8_default; +// mod scatter_i8_axis1; +// mod scatter_i8_axis1_max; +// mod scatter_u32_default; +// mod scatter_u32_axis1; +// mod scatter_u32_add; +// mod array_feature_extractor_1D_i32; +// mod array_feature_extractor_1D_fp8x23; +// mod array_feature_extractor_1D_fp16x16; +// mod array_feature_extractor_2D_i32; +// mod array_feature_extractor_2D_fp8x23; +// mod array_feature_extractor_2D_fp16x16; +// mod array_feature_extractor_3D_i32; +// mod array_feature_extractor_3D_fp8x23; +// mod array_feature_extractor_3D_fp16x16; +// mod binarizer_fp16x16; +// mod binarizer_fp8x23; +// mod tril_fp16x16; +// mod tril_fp16x16_neg; +// mod tril_fp16x16_one_row; +// mod tril_fp16x16_out_neg; +// mod tril_fp16x16_out_pos; +// mod tril_fp16x16_pos; +// mod tril_fp16x16_square; +// mod tril_fp16x16_square_neg; +// mod tril_fp16x16_zero; +// mod triu_fp16x16; +// mod triu_fp16x16_neg; +// mod triu_fp16x16_one_row; +// mod triu_fp16x16_out_neg; +// mod triu_fp16x16_out_pos; +// mod triu_fp16x16_pos; +// mod triu_fp16x16_square; +// mod triu_fp16x16_square_neg; +// mod triu_fp16x16_zero; +// mod tril_fp8x23; +// mod tril_fp8x23_neg; +// mod tril_fp8x23_one_row; +// mod tril_fp8x23_out_neg; +// mod tril_fp8x23_out_pos; +// mod tril_fp8x23_pos; +// mod tril_fp8x23_square; +// mod tril_fp8x23_square_neg; +// mod tril_fp8x23_zero; +// mod triu_fp8x23; +// mod triu_fp8x23_neg; +// mod triu_fp8x23_one_row; +// mod triu_fp8x23_out_neg; +// mod triu_fp8x23_out_pos; +// mod triu_fp8x23_pos; +// mod triu_fp8x23_square; +// mod triu_fp8x23_square_neg; +// mod triu_fp8x23_zero; +// mod tril_i32; +// mod tril_neg_i32; +// mod tril_i32_one_row; +// mod tril_i32_out_neg; +// mod tril_i32_out_pos; +// mod tril_i32_pos; +// mod tril_i32_square; +// mod tril_i32_square_neg; +// mod tril_i32_zero; +// mod triu_i32; +// mod triu_i32_neg; +// mod triu_i32_one_row; +// mod triu_i32_out_neg; +// mod triu_i32_out_pos; +// mod triu_i32_pos; +// mod triu_i32_square; +// mod triu_i32_square_neg; +// mod triu_i32_zero; +// mod tril_i8; +// mod tril_i8_neg; +// mod tril_i8_one_row; +// mod tril_i8_out_neg; +// mod tril_i8_out_pos; +// mod tril_i8_pos; +// mod tril_i8_square; +// mod tril_i8_square_neg; +// mod tril_i8_zero; +// mod triu_i8; +// mod triu_i8_neg; +// mod triu_i8_one_row; +// mod triu_i8_out_neg; +// mod triu_i8_out_pos; +// mod triu_i8_pos; +// mod triu_i8_square; +// mod triu_i8_square_neg; +// mod triu_i8_zero; +// mod tril_u32; +// mod tril_u32_neg; +// mod tril_u32_one_row; +// mod tril_u32_out_neg; +// mod tril_u32_out_pos; +// mod tril_u32_pos; +// mod tril_u32_square; +// mod tril_u32_square_neg; +// mod tril_u32_zero; +// mod triu_u32; +// mod triu_u32_neg; +// mod triu_u32_one_row; +// mod triu_u32_out_neg; +// mod triu_u32_out_pos; +// mod triu_u32_pos; +// mod triu_u32_square; +// mod triu_u32_square_neg; +// mod triu_u32_zero; +// mod reduce_sum_square_fp16x16_export_do_not_keepdims; +// mod reduce_sum_square_fp16x16_export_keepdims; +// mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; +// mod reduce_sum_square_fp8x23_export_do_not_keepdims; +// mod reduce_sum_square_fp8x23_export_keepdims; +// mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; +// mod reduce_sum_square_i32_export_do_not_keepdims; +// mod reduce_sum_square_i32_export_keepdims; +// mod reduce_sum_square_i32_export_negative_axes_keepdims; +// mod reduce_sum_square_i8_export_do_not_keepdims; +// mod reduce_sum_square_i8_export_keepdims; +// mod reduce_sum_square_i8_export_negative_axes_keepdims; +// mod reduce_sum_square_u32_export_do_not_keepdims; +// mod reduce_sum_square_u32_export_keepdims; +// mod reduce_sum_square_u32_export_negative_axes_keepdims; +// mod reduce_l2_fp16x16_export_do_not_keepdims; +// mod reduce_l2_fp16x16_export_keepdims; +// mod reduce_l2_fp16x16_export_negative_axes_keepdims; +// mod reduce_l2_fp8x23_export_do_not_keepdims; +// mod reduce_l2_fp8x23_export_keepdims; +// mod reduce_l2_fp8x23_export_negative_axes_keepdims; +// mod reduce_l1_fp16x16_export_do_not_keepdims; +// mod reduce_l1_fp16x16_export_keepdims; +// mod reduce_l1_fp16x16_export_negative_axes_keepdims; +// mod reduce_l1_fp8x23_export_do_not_keepdims; +// mod reduce_l1_fp8x23_export_keepdims; +// mod reduce_l1_fp8x23_export_negative_axes_keepdims; +// mod reduce_l1_i32_export_do_not_keepdims; +// mod reduce_l1_i32_export_keepdims; +// mod reduce_l1_i32_export_negative_axes_keepdims; +// mod reduce_l1_i8_export_do_not_keepdims; +// mod reduce_l1_i8_export_keepdims; +// mod reduce_l1_i8_export_negative_axes_keepdims; +// mod reduce_l1_u32_export_do_not_keepdims; +// mod reduce_l1_u32_export_keepdims; +// mod reduce_l1_u32_export_negative_axes_keepdims; +// mod reduce_prod_fp16x16_1D; +// mod reduce_prod_fp16x16_2D_default; +// mod reduce_prod_fp16x16_2D_keepdims; +// mod reduce_prod_fp16x16_2D_axis_1; +// mod reduce_prod_fp8x23_1D; +// mod reduce_prod_fp8x23_2D_default; +// mod reduce_prod_fp8x23_2D_keepdims; +// mod reduce_prod_fp8x23_2D_axis_1; +// mod reduce_prod_i32_1D; +// mod reduce_prod_i32_2D_default; +// mod reduce_prod_i32_2D_keepdims; +// mod reduce_prod_i32_2D_axis_1; +// mod reduce_prod_i8_1D; +// mod reduce_prod_i8_2D_default; +// mod reduce_prod_i8_2D_keepdims; +// mod reduce_prod_i8_2D_axis_1; +// mod reduce_prod_u32_1D; +// mod reduce_prod_u32_2D_default; +// mod reduce_prod_u32_2D_keepdims; +// mod reduce_prod_u32_2D_axis_1; +// mod gather_elements_fp16x16_3d_default; +// mod gather_elements_fp16x16_3d_axis1; +// mod gather_elements_fp16x16_3d_axis2; +// mod gather_elements_fp8x23_3d_default; +// mod gather_elements_fp8x23_3d_axis1; +// mod gather_elements_fp8x23_3d_axis2; +// mod gather_elements_i8_3d_default; +// mod gather_elements_i8_3d_axis1; +// mod gather_elements_i32_3d_default; +// mod gather_elements_i32_3d_axis1; +// mod gather_elements_i32_3d_axis2; +// mod gather_elements_u32_default; +// mod gather_elements_u32_axis1; +// mod gather_elements_u32_axis2; +// mod gather_elements_u32_axis3; +// mod sequence_length_fp16x16; +// mod sequence_length_fp16x16_broadcast; +// mod sequence_length_fp8x23; +// mod sequence_length_fp8x23_broadcast; +// mod sequence_length_i32; +// mod sequence_length_i32_broadcast; +// mod sequence_length_i8; +// mod sequence_length_i8_broadcast; +// mod sequence_length_u32; +// mod sequence_length_u32_broadcast; +// mod sequence_at_u32_positive; +// mod sequence_at_u32_negative; +// mod sequence_at_fp16x16_positive; +// mod sequence_at_fp16x16_negative; +// mod sequence_at_fp8x23_positive; +// mod sequence_at_fp8x23_negative; +// mod sequence_at_i32_positive; +// mod sequence_at_i32_negative; +// mod sequence_at_i8_positive; +// mod sequence_at_i8_negative; +// mod reduce_min_fp16x16_1D; +// mod reduce_min_fp16x16_2D_default; +// mod reduce_min_fp16x16_2D_keepdims; +// mod reduce_min_fp16x16_2D_axis_1; +// mod reduce_min_fp8x23_1D; +// mod reduce_min_fp8x23_2D_default; +// mod reduce_min_fp8x23_2D_keepdims; +// mod reduce_min_fp8x23_2D_axis_1; +// mod reduce_min_i32_1D; +// mod reduce_min_i32_2D_default; +// mod reduce_min_i32_2D_keepdims; +// mod reduce_min_i32_2D_axis_1; +// mod reduce_min_i8_1D; +// mod reduce_min_i8_2D_default; +// mod reduce_min_i8_2D_keepdims; +// mod reduce_min_i8_2D_axis_1; +// mod reduce_min_u32_1D; +// mod reduce_min_u32_2D_default; +// mod reduce_min_u32_2D_keepdims; +// mod reduce_min_u32_2D_axis_1; +// mod sequence_construct_fp16x16; +// mod sequence_construct_fp8x23; +// mod sequence_construct_i32; +// mod sequence_construct_i8; +// mod sequence_construct_u32; +// mod shrink_hard_fp16x16; +// mod shrink_soft_fp16x16; +// mod shrink_hard_fp8x23; +// mod shrink_soft_fp8x23; +// mod sequence_empty_fp16x16; +// mod sequence_empty_fp8x23; +// mod sequence_empty_i32; +// mod sequence_empty_i8; +// mod sequence_empty_u32; +// mod reduce_mean_fp16x16_1D; +// mod reduce_mean_fp16x16_2D_default; +// mod reduce_mean_fp16x16_2D_keepdims; +// mod reduce_mean_fp16x16_2D_axis_1; +// mod reduce_mean_fp8x23_1D; +// mod reduce_mean_fp8x23_2D_default; +// mod reduce_mean_fp8x23_2D_keepdims; +// mod reduce_mean_fp8x23_2D_axis_1; +// mod reduce_mean_i32_1D; +// mod reduce_mean_i32_2D_default; +// mod reduce_mean_i32_2D_keepdims; +// mod reduce_mean_i32_2D_axis_1; +// mod reduce_mean_i8_1D; +// mod reduce_mean_i8_2D_default; +// mod reduce_mean_i8_2D_keepdims; +// mod reduce_mean_i8_2D_axis_1; +// mod reduce_mean_u32_1D; +// mod reduce_mean_u32_2D_default; +// mod reduce_mean_u32_2D_keepdims; +// mod reduce_mean_u32_2D_axis_1; +// mod pow_fp16x16; +// mod pow_fp16x16_broadcast; +// mod pow_fp8x23; +// mod pow_fp8x23_broadcast; +// mod sequence_erase_u32_positive; +// mod sequence_erase_u32_negative; +// mod sequence_erase_u32_empty; +// mod sequence_erase_fp16x16_positive; +// mod sequence_erase_fp16x16_negative; +// mod sequence_erase_fp16x16_empty; +// mod sequence_erase_fp8x23_positive; +// mod sequence_erase_fp8x23_negative; +// mod sequence_erase_fp8x23_empty; +// mod sequence_erase_i32_positive; +// mod sequence_erase_i32_negative; +// mod sequence_erase_i32_empty; +// mod sequence_erase_i8_positive; +// mod sequence_erase_i8_negative; +// mod sequence_erase_i8_empty; +// mod sequence_insert_fp16x16; +// mod sequence_insert_fp8x23; +// mod sequence_insert_i32; +// mod sequence_insert_i8; +// mod sequence_insert_u32; +// mod concat_from_sequence_fp8x23_new_axis_zero; +// mod concat_from_sequence_fp8x23_new_axis_one; +// mod concat_from_sequence_fp8x23_new_axis_default; +// mod concat_from_sequence_fp16x16_new_axis_zero; +// mod concat_from_sequence_fp16x16_new_axis_one; +// mod concat_from_sequence_fp16x16_new_axis_default; +// mod concat_from_sequence_i32_new_axis_zero; +// mod concat_from_sequence_i32_new_axis_one; +// mod concat_from_sequence_i32_new_axis_default; +// mod concat_from_sequence_i8_new_axis_zero; +// mod concat_from_sequence_i8_new_axis_one; +// mod concat_from_sequence_i8_new_axis_default; +// mod concat_from_sequence_u32_new_axis_zero; +// mod concat_from_sequence_u32_new_axis_one; +// mod concat_from_sequence_u32_new_axis_default; +// mod is_nan_fp16x16; +// mod is_nan_fp8x23; +// mod is_inf_fp16x16; +// mod is_inf_fp8x23; +// mod is_inf_i32; +// mod is_inf_i8; +// mod is_inf_u32; +// mod is_pos_inf_fp16x16; +// mod is_neg_inf_fp16x16; +// mod is_pos_inf_fp8x23; +// mod is_neg_inf_fp8x23; +// mod is_pos_inf_i32; +// mod is_neg_inf_i32; +// mod is_pos_inf_i8; +// mod is_neg_inf_i8; +// mod reduce_log_sum_fp8x23_export_do_not_keepdims; +// mod reduce_log_sum_fp8x23_export_keepdims; +// mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; +// mod reduce_log_sum_fp16x16_export_do_not_keepdims; +// mod reduce_log_sum_fp16x16_export_keepdims; +// mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; +// mod and_bool; +// mod erf_fp16x16; +// mod erf_fp8x23; +// mod unique_fp16x16_without_axis_sorted; +// mod unique_fp16x16_with_axis_zero_sorted; +// mod unique_u32_without_axis_sorted; +// mod unique_u32_without_axis_not_sorted; +// mod unique_u32_with_axis_zero_sorted; +// mod unique_u32_with_axis_zero_not_sorted; +// mod unique_u32_with_axis_one_sorted; +// mod unique_u32_with_axis_one_not_sorted; +// mod gather_nd_fp16x16_3d_default; +// mod gather_nd_fp16x16_3d_batch_dims1; +// mod gather_nd_fp16x16_3d_batch_dims2; +// mod gather_nd_fp8x23_3d_default; +// mod gather_nd_fp8x23_3d_batch_dims1; +// mod gather_nd_fp8x23_3d_batch_dims2; +// mod gather_nd_i32_3d_default; +// mod gather_nd_i32_3d_batch_dims1; +// mod gather_nd_i32_3d_batch_dims2; +// mod gather_nd_i8_3d_default; +// mod gather_nd_i8_3d_batch_dims1; +// mod gather_nd_u32_default; +// mod gather_nd_u32_batch_dims1; +// mod gather_nd_u32_batch_dims2; +// mod resize_upsample_scales_nearest; +// mod resize_downsample_scales_cubic; +// mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; +// mod resize_downsample_scales_cubic_align_corners; +// mod resize_upsample_scales_linear; +// mod resize_downsample_scales_linear_align_corners; +// mod resize_downsample_scales_nearest; +// mod resize_upsample_scales_cubic; +// mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; +// mod resize_upsample_scales_cubic_align_corners; +// mod resize_upsample_scales_cubic_asymmetric; +// mod resize_upsample_scales_linear_align_corners; +// mod resize_upsample_sizes_nearest; +// mod resize_upsample_sizes_cubic; +// mod resize_downsample_sizes_cubic; +// mod resize_downsample_sizes_nearest; +// mod resize_upsample_scales_linear_half_pixel_symmetric; +// mod resize_downsample_scales_cubic_antialias; +// mod resize_downsample_scales_linear_antialias; +// mod resize_downsample_sizes_cubic_antialias; +// mod resize_downsample_sizes_linear_pytorch_half_pixel; +// mod resize_tf_crop_and_resize; +// mod resize_tf_crop_and_resize_extrapolation_value; +// mod resize_upsample_scales_nearest_axes_2_3; +// mod resize_upsample_scales_nearest_axes_3_2; +// mod resize_upsample_sizes_nearest_axes_2_3; +// mod resize_upsample_sizes_nearest_ceil_half_pixel; +// mod resize_upsample_sizes_nearest_floor_align_corners; +// mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; +// mod resize_downsample_scales_linear_half_pixel_symmetric; +// mod resize_downsample_sizes_nearest_not_larger; +// mod resize_downsample_sizes_nearest_not_smaller; +// mod resize_tf_crop_and_resize_axes_2_3; +// mod resize_tf_crop_and_resize_axes_3_2; +// mod resize_upsample_sizes_nearest_axes_3_2; +// mod resize_upsample_sizes_nearest_not_larger; +// mod resize_upsample_sizes_nearest_not_smaller; +// mod compress_fp16x16_3d_default; +// mod compress_fp16x16_3d_axis1; +// mod compress_fp16x16_3d_axis2; +// mod compress_fp16x16_3d_axis3; +// mod compress_fp16x16_3d_noaxis; +// mod compress_fp8x23_3d_default; +// mod compress_fp8x23_3d_axis1; +// mod compress_fp8x23_3d_axis2; +// mod compress_i32_3d_default; +// mod compress_i32_3d_axis1; +// mod compress_i32_3d_axis2; +// mod compress_i8_3d_default; +// mod compress_i8_3d_axis1; +// mod compress_i8_3d_axis2; +// mod compress_u32_3d_default; +// mod compress_u32_3d_axis1; +// mod compress_u32_3d_axis2; +// mod compress_u32_3d_axis2_2; +// mod compress_u32_3d_axis3; +// mod reduce_log_sum_exp_fp32x32_export_do_not_keepdims; +// mod reduce_log_sum_exp_fp32x32_export_keepdims; +// mod reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims; +// mod layer_normalization_default_axis; +// mod layer_normalization_4d_axis0; +// mod layer_normalization_4d_axis1; +// mod layer_normalization_4d_axis2; +// mod layer_normalization_4d_axis3; +// mod layer_normalization_3d_axis0_epsilon; +// mod layer_normalization_3d_axis_negative_3_epsilon; +// mod layer_normalization_3d_axis1_epsilon; +// mod layer_normalization_3d_axis2_epsilon; +// mod layer_normalization_4d_axis_negative_4; +// mod layer_normalization_4d_axis_negative_3; +// mod layer_normalization_4d_axis_negative_2; +// mod layer_normalization_4d_axis_negative_1; +// mod layer_normalization_3d_axis_negative_2_epsilon; +// mod layer_normalization_3d_axis_negative_1_epsilon; +// mod layer_normalization_test; +// mod split_u32_1d_equal_parts; +// mod split_u32_2d_equal_parts; +// mod split_u32_zero_size; +// mod split_u32_1d_variable_parts; +// mod split_u32_2d_variable_parts; +// mod split_u32_1d_uneven; +// mod split_u32_2d_uneven; +// mod split_fp16x16_1d_equal_parts; +// mod split_fp16x16_1d_variable_parts; +// mod split_fp16x16_2d_equal_parts; +// mod split_fp16x16_2d_variable_parts; +// mod split_fp16x16_zero_size; +// mod split_fp16x16_1d_uneven; +// mod split_fp16x16_2d_uneven; +// mod grid_sample; +// mod grid_sample_cubic; +// mod grid_sample_aligncorners; +// mod grid_sample_nearest; +// mod grid_sample_nearest_aligncorner; +// mod grid_sample_padding_border; +// mod grid_sample_padding_reflection; +// mod grid_sample_padding_zeros; +// mod col2im; +// mod col2im_5D; +// mod col2im_dilations; +// mod col2im_pads; +// mod col2im_strides; +// mod random_uniform_like_fp16x16; +// mod random_uniform_like_fp8x23; +// mod range_fp8x23; +// mod range_fp16x16; +// mod range_i32; +// mod range_i8; +// mod range_u32; +// mod hann_window_fp8x23; +// mod hann_window_fp16x16; +// mod hamming_window_fp16x16; +// mod hamming_window_fp8x23; +// mod blackman_window_fp16x16; +// mod blackman_window_fp8x23; +// mod split_to_sequence_fp16x16_1d_equal_parts; +// mod split_to_sequence_fp16x16_1d_variable_parts; +// mod split_to_sequence_fp16x16_2d_equal_parts; +// mod split_to_sequence_fp16x16_2d_variable_parts; +// mod split_to_sequence_fp16x16_zero_size; +// mod split_to_sequence_fp16x16_1d_uneven; +// mod split_to_sequence_fp16x16_2d_uneven; +// mod split_to_sequence_u32_1d_equal_parts; +// mod split_to_sequence_u32_1d_variable_parts; +// mod split_to_sequence_u32_2d_equal_parts; +// mod split_to_sequence_u32_2d_variable_parts; +// mod split_to_sequence_u32_zero_size; +// mod split_to_sequence_u32_1d_uneven; +// mod split_to_sequence_u32_2d_uneven; +// mod split_to_sequence_2d_scalar; +// mod split_to_sequence_2d_nokeepdims; +// mod split_to_sequence_1d_nokeepdims; +// mod reverse_sequence_fp16x16_batch_equal_parts; +// mod reverse_sequence_fp16x16_time_equal_parts; +// mod reverse_sequence_i32_batch_equal_parts; +// mod reverse_sequence_i32_time_equal_parts; +// mod reverse_sequence_i8_batch_equal_parts; +// mod reverse_sequence_i8_time_equal_parts; +// mod reverse_sequence_u32_4x4_batch; +// mod reverse_sequence_u32_4x4_time; +// mod reverse_sequence_u32_3x3_batch; +// mod reverse_sequence_u32_3x3_time; +// mod reverse_sequence_different_dimensions_4_5; +// mod reverse_sequence_different_dimensions_2_4; +// mod reverse_sequence_different_dimensions_1_6; +// mod reverse_sequence_different_dimensions_3x9_batch; +// mod reverse_sequence_different_dimensions_3x9_time; +// mod conv_transpose; +// mod conv_transpose_1d; +// mod conv_transpose_3d; +// mod conv_transpose_attributes; +// mod conv_transpose_autopad_same; +// mod conv_transpose_dilations; +// mod conv_transpose_pads; +// mod conv_transpose_group_2; +// mod conv_transpose_group_2_image_3; +// mod depth_to_space_fp16x16; +// mod depth_to_space_fp8x23; +// mod depth_to_space_i32; +// mod depth_to_space_i8; +// mod depth_to_space_u32; +// mod space_to_depth_fp16x16; +// mod space_to_depth_fp8x23; +// mod space_to_depth_i32; +// mod space_to_depth_i8; +// mod space_to_depth_u32; +// mod scatter_nd_fp16x16_3d_default; +// mod scatter_nd_fp16x16_3d_add; +// mod scatter_nd_fp16x16_3d_mul; +// mod scatter_nd_fp16x16_3d_max; +// mod scatter_nd_fp16x16_3d_min; +// mod scatter_nd_fp8x23_3d_default; +// mod scatter_nd_fp8x23_3d_add; +// mod scatter_nd_fp8x23_3d_mul; +// mod scatter_nd_fp8x23_3d_max; +// mod scatter_nd_fp8x23_3d_min; +// mod scatter_nd_u32_default; +// mod scatter_nd_u32_add; +// mod scatter_nd_u32_mul; +// mod scatter_nd_u32_max; +// mod scatter_nd_u32_min; +// mod conv_2D_with_padding; +// mod conv_1D_no_padding; +// mod conv_1D_with_padding; +// mod conv_3D_no_padding; +// mod conv_3D_with_padding; +// mod conv_4D_no_padding; +// mod conv_2D_with_2_groups; +// mod conv_2D_with_autopad_same; +// mod conv_2D_with_strides_asymmetric_padding; +// mod conv_2D_with_strides_with_padding; +// mod conv_4D_with_padding; +// mod label_encoder_fp16x16_3d_default; +// mod label_encoder_fp8x23_default; +// mod label_encoder_i8_default; +// mod label_encoder_i32_default; +// mod label_encoder_u32_default; From 5b972156be90b3cbe5b02d01f3bf569da3d42e9d Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Fri, 22 Mar 2024 17:01:41 +0100 Subject: [PATCH 34/68] test gather --- nodegen/node/gather.py | 329 ++++-------------- tests/nodes.cairo | 20 +- tests/nodes/gather_fp16x16_3d_axis1.cairo | 10 +- .../gather_fp16x16_3d_axis1/input_1.cairo | 4 +- tests/nodes/gather_fp16x16_3d_axis2.cairo | 10 +- .../gather_fp16x16_3d_axis2/input_1.cairo | 4 +- tests/nodes/gather_fp16x16_3d_default.cairo | 10 +- .../gather_fp16x16_3d_default/input_1.cairo | 4 +- tests/nodes/gather_fp8x23_3d_axis1.cairo | 24 -- .../gather_fp8x23_3d_axis1/input_0.cairo | 41 --- .../gather_fp8x23_3d_axis1/input_1.cairo | 19 - .../gather_fp8x23_3d_axis1/output_0.cairo | 69 ---- tests/nodes/gather_fp8x23_3d_axis2.cairo | 24 -- .../gather_fp8x23_3d_axis2/input_0.cairo | 41 --- .../gather_fp8x23_3d_axis2/input_1.cairo | 19 - .../gather_fp8x23_3d_axis2/output_0.cairo | 69 ---- tests/nodes/gather_fp8x23_3d_default.cairo | 24 -- .../gather_fp8x23_3d_default/input_0.cairo | 41 --- .../gather_fp8x23_3d_default/output_0.cairo | 69 ---- tests/nodes/gather_i32_3d_axis1.cairo | 24 -- tests/nodes/gather_i32_3d_axis1/input_0.cairo | 41 --- .../nodes/gather_i32_3d_axis1/output_0.cairo | 69 ---- tests/nodes/gather_i32_3d_axis2/input_0.cairo | 41 --- tests/nodes/gather_i32_3d_axis2/input_1.cairo | 19 - .../nodes/gather_i32_3d_axis2/output_0.cairo | 69 ---- .../nodes/gather_i32_3d_default/input_0.cairo | 41 --- .../nodes/gather_i32_3d_default/input_1.cairo | 19 - .../gather_i32_3d_default/output_0.cairo | 69 ---- tests/nodes/gather_i8_3d_axis1.cairo | 24 -- tests/nodes/gather_i8_3d_axis1/input_0.cairo | 41 --- tests/nodes/gather_i8_3d_axis1/input_1.cairo | 19 - tests/nodes/gather_i8_3d_axis1/output_0.cairo | 69 ---- tests/nodes/gather_i8_3d_axis2.cairo | 24 -- tests/nodes/gather_i8_3d_axis2/input_0.cairo | 41 --- tests/nodes/gather_i8_3d_axis2/input_1.cairo | 19 - tests/nodes/gather_i8_3d_axis2/output_0.cairo | 69 ---- tests/nodes/gather_i8_3d_default.cairo | 24 -- .../nodes/gather_i8_3d_default/input_0.cairo | 41 --- .../nodes/gather_i8_3d_default/input_1.cairo | 19 - .../nodes/gather_i8_3d_default/output_0.cairo | 69 ---- ...fault.cairo => gather_negative_axis.cairo} | 12 +- .../nodes/gather_negative_axis/input_0.cairo | 41 +++ .../input_1.cairo | 4 +- .../nodes/gather_negative_axis/output_0.cairo | 69 ++++ ...s2.cairo => gather_negative_indices.cairo} | 12 +- .../gather_negative_indices/input_0.cairo | 22 ++ .../input_1.cairo | 12 +- .../gather_negative_indices/output_0.cairo | 15 + tests/nodes/gather_u32_3d_axis1.cairo | 22 -- tests/nodes/gather_u32_3d_axis1/input_0.cairo | 50 --- tests/nodes/gather_u32_3d_axis1/input_1.cairo | 19 - .../nodes/gather_u32_3d_axis1/output_0.cairo | 69 ---- tests/nodes/gather_u32_3d_axis2.cairo | 22 -- tests/nodes/gather_u32_3d_axis2/input_0.cairo | 50 --- tests/nodes/gather_u32_3d_axis2/input_1.cairo | 19 - .../nodes/gather_u32_3d_axis2/output_0.cairo | 87 ----- tests/nodes/gather_u32_3d_default.cairo | 22 -- .../nodes/gather_u32_3d_default/input_0.cairo | 50 --- .../nodes/gather_u32_3d_default/input_1.cairo | 19 - .../gather_u32_3d_default/output_0.cairo | 87 ----- 60 files changed, 268 insertions(+), 2117 deletions(-) delete mode 100644 tests/nodes/gather_fp8x23_3d_axis1.cairo delete mode 100644 tests/nodes/gather_fp8x23_3d_axis1/input_0.cairo delete mode 100644 tests/nodes/gather_fp8x23_3d_axis1/input_1.cairo delete mode 100644 tests/nodes/gather_fp8x23_3d_axis1/output_0.cairo delete mode 100644 tests/nodes/gather_fp8x23_3d_axis2.cairo delete mode 100644 tests/nodes/gather_fp8x23_3d_axis2/input_0.cairo delete mode 100644 tests/nodes/gather_fp8x23_3d_axis2/input_1.cairo delete mode 100644 tests/nodes/gather_fp8x23_3d_axis2/output_0.cairo delete mode 100644 tests/nodes/gather_fp8x23_3d_default.cairo delete mode 100644 tests/nodes/gather_fp8x23_3d_default/input_0.cairo delete mode 100644 tests/nodes/gather_fp8x23_3d_default/output_0.cairo delete mode 100644 tests/nodes/gather_i32_3d_axis1.cairo delete mode 100644 tests/nodes/gather_i32_3d_axis1/input_0.cairo delete mode 100644 tests/nodes/gather_i32_3d_axis1/output_0.cairo delete mode 100644 tests/nodes/gather_i32_3d_axis2/input_0.cairo delete mode 100644 tests/nodes/gather_i32_3d_axis2/input_1.cairo delete mode 100644 tests/nodes/gather_i32_3d_axis2/output_0.cairo delete mode 100644 tests/nodes/gather_i32_3d_default/input_0.cairo delete mode 100644 tests/nodes/gather_i32_3d_default/input_1.cairo delete mode 100644 tests/nodes/gather_i32_3d_default/output_0.cairo delete mode 100644 tests/nodes/gather_i8_3d_axis1.cairo delete mode 100644 tests/nodes/gather_i8_3d_axis1/input_0.cairo delete mode 100644 tests/nodes/gather_i8_3d_axis1/input_1.cairo delete mode 100644 tests/nodes/gather_i8_3d_axis1/output_0.cairo delete mode 100644 tests/nodes/gather_i8_3d_axis2.cairo delete mode 100644 tests/nodes/gather_i8_3d_axis2/input_0.cairo delete mode 100644 tests/nodes/gather_i8_3d_axis2/input_1.cairo delete mode 100644 tests/nodes/gather_i8_3d_axis2/output_0.cairo delete mode 100644 tests/nodes/gather_i8_3d_default.cairo delete mode 100644 tests/nodes/gather_i8_3d_default/input_0.cairo delete mode 100644 tests/nodes/gather_i8_3d_default/input_1.cairo delete mode 100644 tests/nodes/gather_i8_3d_default/output_0.cairo rename tests/nodes/{gather_i32_3d_default.cairo => gather_negative_axis.cairo} (67%) create mode 100644 tests/nodes/gather_negative_axis/input_0.cairo rename tests/nodes/{gather_fp8x23_3d_default => gather_negative_axis}/input_1.cairo (83%) create mode 100644 tests/nodes/gather_negative_axis/output_0.cairo rename tests/nodes/{gather_i32_3d_axis2.cairo => gather_negative_indices.cairo} (67%) create mode 100644 tests/nodes/gather_negative_indices/input_0.cairo rename tests/nodes/{gather_i32_3d_axis1 => gather_negative_indices}/input_1.cairo (59%) create mode 100644 tests/nodes/gather_negative_indices/output_0.cairo delete mode 100644 tests/nodes/gather_u32_3d_axis1.cairo delete mode 100644 tests/nodes/gather_u32_3d_axis1/input_0.cairo delete mode 100644 tests/nodes/gather_u32_3d_axis1/input_1.cairo delete mode 100644 tests/nodes/gather_u32_3d_axis1/output_0.cairo delete mode 100644 tests/nodes/gather_u32_3d_axis2.cairo delete mode 100644 tests/nodes/gather_u32_3d_axis2/input_0.cairo delete mode 100644 tests/nodes/gather_u32_3d_axis2/input_1.cairo delete mode 100644 tests/nodes/gather_u32_3d_axis2/output_0.cairo delete mode 100644 tests/nodes/gather_u32_3d_default.cairo delete mode 100644 tests/nodes/gather_u32_3d_default/input_0.cairo delete mode 100644 tests/nodes/gather_u32_3d_default/input_1.cairo delete mode 100644 tests/nodes/gather_u32_3d_default/output_0.cairo diff --git a/nodegen/node/gather.py b/nodegen/node/gather.py index 5ba2692fb..ed2317555 100644 --- a/nodegen/node/gather.py +++ b/nodegen/node/gather.py @@ -7,258 +7,83 @@ class Gather(RunAll): @staticmethod def gather_fp16x16(): - def gather_3D(): - def default(): - x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) - x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.uint32) - y = x1.take(x2, axis=0) - - x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "gather_fp16x16_3d_default" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(0))", - name= name) - - def axis1(): - x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) - x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.int64) - y = x1.take(x2, axis=1) - - x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "gather_fp16x16_3d_axis1" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(1))", - name= name) - - def axis2(): - x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) - x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.int64) - y = x1.take(x2, axis=2) - - x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "gather_fp16x16_3d_axis2" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(2))", - name= name) - - default() - axis1() - axis2() - gather_3D() - - @staticmethod - def gather_fp8x23(): + def default(): + x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) + x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.uint32) + y = x1.take(x2, axis=0) + + x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) + x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "gather_fp16x16_3d_default" + make_test( + inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(0))", + name= name) - def gather_3D(): - def default(): - x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) - x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.int64) - y = x1.take(x2, axis=0) - - x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) - - name = "gather_fp8x23_3d_default" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(0))", - name= name) - - def axis1(): - x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) - x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.int64) - y = x1.take(x2, axis=1) - - x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) - - name = "gather_fp8x23_3d_axis1" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(1))", - name= name) - - def axis2(): - x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) - x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.int64) - y = x1.take(x2, axis=2) - - x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) - - name = "gather_fp8x23_3d_axis2" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(2))", - name= name) - - default() - axis1() - axis2() - gather_3D() - - @staticmethod - def gather_i8(): + def axis1(): + x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) + x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.int64) + y = x1.take(x2, axis=1) + + x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) + x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "gather_fp16x16_3d_axis1" + make_test( + inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(1))", + name= name) - def gather_3D(): - def default(): - x1 = np.arange(0,27).reshape(3,3,3).astype(np.int8) - x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.int8) - y = x1.take(x2, axis=0) - - x1 = Tensor(Dtype.I8, x1.shape, x1.flatten()) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.I8, y.shape, y.flatten()) - - name = "gather_i8_3d_default" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(0))", - name= name) - - def axis1(): - x1 = np.arange(0,27).reshape(3,3,3).astype(np.int8) - x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.int8) - y = x1.take(x2, axis=1) - - x1 = Tensor(Dtype.I8, x1.shape, x1.flatten()) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.I8, y.shape, y.flatten()) - - name = "gather_i8_3d_axis1" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(1))", - name= name) - - def axis2(): - x1 = np.arange(0,27).reshape(3,3,3).astype(np.int8) - x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.int8) - y = x1.take(x2, axis=2) - - x1 = Tensor(Dtype.I8, x1.shape, x1.flatten()) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.I8, y.shape, y.flatten()) - - name = "gather_i8_3d_axis2" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(2))", - name= name) - - default() - axis1() - axis2() - gather_3D() - - - @staticmethod - def gather_i32(): + def axis2(): + x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) + x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.int64) + y = x1.take(x2, axis=2) + + x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) + x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "gather_fp16x16_3d_axis2" + make_test( + inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(2))", + name= name) + + def negative_indices(): + x1 = np.arange(10).astype(np.float32) + x2 = np.array([0, -9, -10]).astype(np.int64) + y = np.take(x1, x2, axis=0) + + x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) + x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "gather_negative_indices" + make_test( + inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(0))", + name= name) + + def negative_axis(): + x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) + x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.uint32) + y = x1.take(x2, axis=-1) + + x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) + x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "gather_negative_axis" + make_test( + inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(-1))", + name= name) - def gather_3D(): - def default(): - x1 = np.arange(0,27).reshape(3,3,3).astype(np.int32) - x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.int32) - y = x1.take(x2, axis=0) - - x1 = Tensor(Dtype.I32, x1.shape, x1.flatten()) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "gather_i32_3d_default" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(0))", - name= name) - - def axis1(): - x1 = np.arange(0,27).reshape(3,3,3).astype(np.int32) - x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.int32) - y = x1.take(x2, axis=1) - - x1 = Tensor(Dtype.I32, x1.shape, x1.flatten()) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "gather_i32_3d_axis1" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(1))", - name= name) - - def axis2(): - x1 = np.arange(0,27).reshape(3,3,3).astype(np.int32) - x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.int32) - y = x1.take(x2, axis=2) - - x1 = Tensor(Dtype.I32, x1.shape, x1.flatten()) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "gather_i32_3d_axis2" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(2))", - name= name) - - default() - axis1() - axis2() - gather_3D() - - @staticmethod - def gather_u32(): - - def gather_3D(): - def default(): - x1 = np.arange(0,36).reshape(3,4,3).astype(np.uint32) - x2 = np.array([[0,1], [2,1], [0, 2]]).astype(np.uint32) - y = x1.take(x2, axis=0) - - x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "gather_u32_3d_default" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(0))", - name= name) - - def axis1(): - x1 = np.arange(0,36).reshape(3,4,3).astype(np.uint32) - x2 = np.array([[0,1], [2,1], [1, 3]]).astype(np.uint32) - y = x1.take(x2, axis=1) - - x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "gather_u32_3d_axis1" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(1))", - name= name) - - def axis2(): - x1 = np.arange(0,36).reshape(3,4,3).astype(np.uint32) - x2 = np.array([[0,1], [2,1], [1, 2]]).astype(np.uint32) - y = x1.take(x2, axis=2) - - x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "gather_u32_3d_axis2" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather(indices:input_1, axis:Option::Some(2))", - name= name) - - default() - axis1() - axis2() - gather_3D() + default() + axis1() + axis2() + negative_indices() + negative_axis() diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 09ab341e9..1f37081b1 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -378,21 +378,6 @@ // mod slice_i8_3d; // mod slice_u32_2d; // mod slice_u32_3d; -// mod gather_fp8x23_3d_default; -// mod gather_fp8x23_3d_axis1; -// mod gather_fp8x23_3d_axis2; -// mod gather_fp16x16_3d_default; -// mod gather_fp16x16_3d_axis1; -// mod gather_fp16x16_3d_axis2; -// mod gather_i8_3d_default; -// mod gather_i8_3d_axis1; -// mod gather_i8_3d_axis2; -// mod gather_i32_3d_default; -// mod gather_i32_3d_axis1; -// mod gather_i32_3d_axis2; -// mod gather_u32_3d_default; -// mod gather_u32_3d_axis1; -// mod gather_u32_3d_axis2; // mod nonzero_fp16x16_2d; // mod nonzero_fp16x16_3d; // mod nonzero_fp8x23_2d; @@ -1047,3 +1032,8 @@ // mod label_encoder_i8_default; // mod label_encoder_i32_default; // mod label_encoder_u32_default; +mod gather_fp16x16_3d_default; +mod gather_fp16x16_3d_axis1; +mod gather_fp16x16_3d_axis2; +mod gather_negative_indices; +mod gather_negative_axis; diff --git a/tests/nodes/gather_fp16x16_3d_axis1.cairo b/tests/nodes/gather_fp16x16_3d_axis1.cairo index 429d085d4..d10ab5245 100644 --- a/tests/nodes/gather_fp16x16_3d_axis1.cairo +++ b/tests/nodes/gather_fp16x16_3d_axis1.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::operators::tensor::FP16x16TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; #[test] #[available_gas(2000000000)] @@ -18,7 +18,7 @@ fn test_gather_fp16x16_3d_axis1() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); + let y_0 = input_0.gather(indices:input_1, axis:Option::Some(1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp16x16_3d_axis1/input_1.cairo b/tests/nodes/gather_fp16x16_3d_axis1/input_1.cairo index b30e2f0c2..d5bbc25be 100644 --- a/tests/nodes/gather_fp16x16_3d_axis1/input_1.cairo +++ b/tests/nodes/gather_fp16x16_3d_axis1/input_1.cairo @@ -1,9 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn input_1() -> Tensor { +fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(2); diff --git a/tests/nodes/gather_fp16x16_3d_axis2.cairo b/tests/nodes/gather_fp16x16_3d_axis2.cairo index cfb8a61d2..40ef5691d 100644 --- a/tests/nodes/gather_fp16x16_3d_axis2.cairo +++ b/tests/nodes/gather_fp16x16_3d_axis2.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::operators::tensor::FP16x16TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; #[test] #[available_gas(2000000000)] @@ -18,7 +18,7 @@ fn test_gather_fp16x16_3d_axis2() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); + let y_0 = input_0.gather(indices:input_1, axis:Option::Some(2)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp16x16_3d_axis2/input_1.cairo b/tests/nodes/gather_fp16x16_3d_axis2/input_1.cairo index b30e2f0c2..d5bbc25be 100644 --- a/tests/nodes/gather_fp16x16_3d_axis2/input_1.cairo +++ b/tests/nodes/gather_fp16x16_3d_axis2/input_1.cairo @@ -1,9 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn input_1() -> Tensor { +fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(2); diff --git a/tests/nodes/gather_fp16x16_3d_default.cairo b/tests/nodes/gather_fp16x16_3d_default.cairo index ee49aac75..2003b0838 100644 --- a/tests/nodes/gather_fp16x16_3d_default.cairo +++ b/tests/nodes/gather_fp16x16_3d_default.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::operators::tensor::FP16x16TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::I32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; #[test] #[available_gas(2000000000)] @@ -18,7 +18,7 @@ fn test_gather_fp16x16_3d_default() { let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); + let y_0 = input_0.gather(indices:input_1, axis:Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_fp16x16_3d_default/input_1.cairo b/tests/nodes/gather_fp16x16_3d_default/input_1.cairo index b30e2f0c2..d5bbc25be 100644 --- a/tests/nodes/gather_fp16x16_3d_default/input_1.cairo +++ b/tests/nodes/gather_fp16x16_3d_default/input_1.cairo @@ -1,9 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn input_1() -> Tensor { +fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(2); diff --git a/tests/nodes/gather_fp8x23_3d_axis1.cairo b/tests/nodes/gather_fp8x23_3d_axis1.cairo deleted file mode 100644 index c9c6dcf7f..000000000 --- a/tests/nodes/gather_fp8x23_3d_axis1.cairo +++ /dev/null @@ -1,24 +0,0 @@ -mod input_0; -mod input_1; -mod output_0; - - -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_gather_fp8x23_3d_axis1() { - let input_0 = input_0::input_0(); - let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/gather_fp8x23_3d_axis1/input_0.cairo b/tests/nodes/gather_fp8x23_3d_axis1/input_0.cairo deleted file mode 100644 index faabab14c..000000000 --- a/tests/nodes/gather_fp8x23_3d_axis1/input_0.cairo +++ /dev/null @@ -1,41 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - data.append(FP8x23 { mag: 41943040, sign: false }); - data.append(FP8x23 { mag: 50331648, sign: false }); - data.append(FP8x23 { mag: 58720256, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 75497472, sign: false }); - data.append(FP8x23 { mag: 83886080, sign: false }); - data.append(FP8x23 { mag: 92274688, sign: false }); - data.append(FP8x23 { mag: 100663296, sign: false }); - data.append(FP8x23 { mag: 109051904, sign: false }); - data.append(FP8x23 { mag: 117440512, sign: false }); - data.append(FP8x23 { mag: 125829120, sign: false }); - data.append(FP8x23 { mag: 134217728, sign: false }); - data.append(FP8x23 { mag: 142606336, sign: false }); - data.append(FP8x23 { mag: 150994944, sign: false }); - data.append(FP8x23 { mag: 159383552, sign: false }); - data.append(FP8x23 { mag: 167772160, sign: false }); - data.append(FP8x23 { mag: 176160768, sign: false }); - data.append(FP8x23 { mag: 184549376, sign: false }); - data.append(FP8x23 { mag: 192937984, sign: false }); - data.append(FP8x23 { mag: 201326592, sign: false }); - data.append(FP8x23 { mag: 209715200, sign: false }); - data.append(FP8x23 { mag: 218103808, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_fp8x23_3d_axis1/input_1.cairo b/tests/nodes/gather_fp8x23_3d_axis1/input_1.cairo deleted file mode 100644 index b30e2f0c2..000000000 --- a/tests/nodes/gather_fp8x23_3d_axis1/input_1.cairo +++ /dev/null @@ -1,19 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_1() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_fp8x23_3d_axis1/output_0.cairo b/tests/nodes/gather_fp8x23_3d_axis1/output_0.cairo deleted file mode 100644 index a7fc4bc2f..000000000 --- a/tests/nodes/gather_fp8x23_3d_axis1/output_0.cairo +++ /dev/null @@ -1,69 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(2); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - data.append(FP8x23 { mag: 41943040, sign: false }); - data.append(FP8x23 { mag: 50331648, sign: false }); - data.append(FP8x23 { mag: 58720256, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - data.append(FP8x23 { mag: 41943040, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 50331648, sign: false }); - data.append(FP8x23 { mag: 58720256, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 75497472, sign: false }); - data.append(FP8x23 { mag: 83886080, sign: false }); - data.append(FP8x23 { mag: 92274688, sign: false }); - data.append(FP8x23 { mag: 100663296, sign: false }); - data.append(FP8x23 { mag: 109051904, sign: false }); - data.append(FP8x23 { mag: 117440512, sign: false }); - data.append(FP8x23 { mag: 125829120, sign: false }); - data.append(FP8x23 { mag: 134217728, sign: false }); - data.append(FP8x23 { mag: 142606336, sign: false }); - data.append(FP8x23 { mag: 100663296, sign: false }); - data.append(FP8x23 { mag: 109051904, sign: false }); - data.append(FP8x23 { mag: 117440512, sign: false }); - data.append(FP8x23 { mag: 75497472, sign: false }); - data.append(FP8x23 { mag: 83886080, sign: false }); - data.append(FP8x23 { mag: 92274688, sign: false }); - data.append(FP8x23 { mag: 125829120, sign: false }); - data.append(FP8x23 { mag: 134217728, sign: false }); - data.append(FP8x23 { mag: 142606336, sign: false }); - data.append(FP8x23 { mag: 150994944, sign: false }); - data.append(FP8x23 { mag: 159383552, sign: false }); - data.append(FP8x23 { mag: 167772160, sign: false }); - data.append(FP8x23 { mag: 176160768, sign: false }); - data.append(FP8x23 { mag: 184549376, sign: false }); - data.append(FP8x23 { mag: 192937984, sign: false }); - data.append(FP8x23 { mag: 201326592, sign: false }); - data.append(FP8x23 { mag: 209715200, sign: false }); - data.append(FP8x23 { mag: 218103808, sign: false }); - data.append(FP8x23 { mag: 176160768, sign: false }); - data.append(FP8x23 { mag: 184549376, sign: false }); - data.append(FP8x23 { mag: 192937984, sign: false }); - data.append(FP8x23 { mag: 150994944, sign: false }); - data.append(FP8x23 { mag: 159383552, sign: false }); - data.append(FP8x23 { mag: 167772160, sign: false }); - data.append(FP8x23 { mag: 201326592, sign: false }); - data.append(FP8x23 { mag: 209715200, sign: false }); - data.append(FP8x23 { mag: 218103808, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_fp8x23_3d_axis2.cairo b/tests/nodes/gather_fp8x23_3d_axis2.cairo deleted file mode 100644 index 726411dd2..000000000 --- a/tests/nodes/gather_fp8x23_3d_axis2.cairo +++ /dev/null @@ -1,24 +0,0 @@ -mod input_0; -mod input_1; -mod output_0; - - -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_gather_fp8x23_3d_axis2() { - let input_0 = input_0::input_0(); - let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/gather_fp8x23_3d_axis2/input_0.cairo b/tests/nodes/gather_fp8x23_3d_axis2/input_0.cairo deleted file mode 100644 index faabab14c..000000000 --- a/tests/nodes/gather_fp8x23_3d_axis2/input_0.cairo +++ /dev/null @@ -1,41 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - data.append(FP8x23 { mag: 41943040, sign: false }); - data.append(FP8x23 { mag: 50331648, sign: false }); - data.append(FP8x23 { mag: 58720256, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 75497472, sign: false }); - data.append(FP8x23 { mag: 83886080, sign: false }); - data.append(FP8x23 { mag: 92274688, sign: false }); - data.append(FP8x23 { mag: 100663296, sign: false }); - data.append(FP8x23 { mag: 109051904, sign: false }); - data.append(FP8x23 { mag: 117440512, sign: false }); - data.append(FP8x23 { mag: 125829120, sign: false }); - data.append(FP8x23 { mag: 134217728, sign: false }); - data.append(FP8x23 { mag: 142606336, sign: false }); - data.append(FP8x23 { mag: 150994944, sign: false }); - data.append(FP8x23 { mag: 159383552, sign: false }); - data.append(FP8x23 { mag: 167772160, sign: false }); - data.append(FP8x23 { mag: 176160768, sign: false }); - data.append(FP8x23 { mag: 184549376, sign: false }); - data.append(FP8x23 { mag: 192937984, sign: false }); - data.append(FP8x23 { mag: 201326592, sign: false }); - data.append(FP8x23 { mag: 209715200, sign: false }); - data.append(FP8x23 { mag: 218103808, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_fp8x23_3d_axis2/input_1.cairo b/tests/nodes/gather_fp8x23_3d_axis2/input_1.cairo deleted file mode 100644 index b30e2f0c2..000000000 --- a/tests/nodes/gather_fp8x23_3d_axis2/input_1.cairo +++ /dev/null @@ -1,19 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_1() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_fp8x23_3d_axis2/output_0.cairo b/tests/nodes/gather_fp8x23_3d_axis2/output_0.cairo deleted file mode 100644 index 7ba68407b..000000000 --- a/tests/nodes/gather_fp8x23_3d_axis2/output_0.cairo +++ /dev/null @@ -1,69 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - data.append(FP8x23 { mag: 41943040, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - data.append(FP8x23 { mag: 41943040, sign: false }); - data.append(FP8x23 { mag: 50331648, sign: false }); - data.append(FP8x23 { mag: 58720256, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 58720256, sign: false }); - data.append(FP8x23 { mag: 50331648, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 75497472, sign: false }); - data.append(FP8x23 { mag: 83886080, sign: false }); - data.append(FP8x23 { mag: 92274688, sign: false }); - data.append(FP8x23 { mag: 83886080, sign: false }); - data.append(FP8x23 { mag: 75497472, sign: false }); - data.append(FP8x23 { mag: 92274688, sign: false }); - data.append(FP8x23 { mag: 100663296, sign: false }); - data.append(FP8x23 { mag: 109051904, sign: false }); - data.append(FP8x23 { mag: 117440512, sign: false }); - data.append(FP8x23 { mag: 109051904, sign: false }); - data.append(FP8x23 { mag: 100663296, sign: false }); - data.append(FP8x23 { mag: 117440512, sign: false }); - data.append(FP8x23 { mag: 125829120, sign: false }); - data.append(FP8x23 { mag: 134217728, sign: false }); - data.append(FP8x23 { mag: 142606336, sign: false }); - data.append(FP8x23 { mag: 134217728, sign: false }); - data.append(FP8x23 { mag: 125829120, sign: false }); - data.append(FP8x23 { mag: 142606336, sign: false }); - data.append(FP8x23 { mag: 150994944, sign: false }); - data.append(FP8x23 { mag: 159383552, sign: false }); - data.append(FP8x23 { mag: 167772160, sign: false }); - data.append(FP8x23 { mag: 159383552, sign: false }); - data.append(FP8x23 { mag: 150994944, sign: false }); - data.append(FP8x23 { mag: 167772160, sign: false }); - data.append(FP8x23 { mag: 176160768, sign: false }); - data.append(FP8x23 { mag: 184549376, sign: false }); - data.append(FP8x23 { mag: 192937984, sign: false }); - data.append(FP8x23 { mag: 184549376, sign: false }); - data.append(FP8x23 { mag: 176160768, sign: false }); - data.append(FP8x23 { mag: 192937984, sign: false }); - data.append(FP8x23 { mag: 201326592, sign: false }); - data.append(FP8x23 { mag: 209715200, sign: false }); - data.append(FP8x23 { mag: 218103808, sign: false }); - data.append(FP8x23 { mag: 209715200, sign: false }); - data.append(FP8x23 { mag: 201326592, sign: false }); - data.append(FP8x23 { mag: 218103808, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_fp8x23_3d_default.cairo b/tests/nodes/gather_fp8x23_3d_default.cairo deleted file mode 100644 index e844827f9..000000000 --- a/tests/nodes/gather_fp8x23_3d_default.cairo +++ /dev/null @@ -1,24 +0,0 @@ -mod input_0; -mod input_1; -mod output_0; - - -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_gather_fp8x23_3d_default() { - let input_0 = input_0::input_0(); - let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/gather_fp8x23_3d_default/input_0.cairo b/tests/nodes/gather_fp8x23_3d_default/input_0.cairo deleted file mode 100644 index faabab14c..000000000 --- a/tests/nodes/gather_fp8x23_3d_default/input_0.cairo +++ /dev/null @@ -1,41 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - data.append(FP8x23 { mag: 41943040, sign: false }); - data.append(FP8x23 { mag: 50331648, sign: false }); - data.append(FP8x23 { mag: 58720256, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 75497472, sign: false }); - data.append(FP8x23 { mag: 83886080, sign: false }); - data.append(FP8x23 { mag: 92274688, sign: false }); - data.append(FP8x23 { mag: 100663296, sign: false }); - data.append(FP8x23 { mag: 109051904, sign: false }); - data.append(FP8x23 { mag: 117440512, sign: false }); - data.append(FP8x23 { mag: 125829120, sign: false }); - data.append(FP8x23 { mag: 134217728, sign: false }); - data.append(FP8x23 { mag: 142606336, sign: false }); - data.append(FP8x23 { mag: 150994944, sign: false }); - data.append(FP8x23 { mag: 159383552, sign: false }); - data.append(FP8x23 { mag: 167772160, sign: false }); - data.append(FP8x23 { mag: 176160768, sign: false }); - data.append(FP8x23 { mag: 184549376, sign: false }); - data.append(FP8x23 { mag: 192937984, sign: false }); - data.append(FP8x23 { mag: 201326592, sign: false }); - data.append(FP8x23 { mag: 209715200, sign: false }); - data.append(FP8x23 { mag: 218103808, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_fp8x23_3d_default/output_0.cairo b/tests/nodes/gather_fp8x23_3d_default/output_0.cairo deleted file mode 100644 index 2d6dcdcf4..000000000 --- a/tests/nodes/gather_fp8x23_3d_default/output_0.cairo +++ /dev/null @@ -1,69 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - data.append(FP8x23 { mag: 41943040, sign: false }); - data.append(FP8x23 { mag: 50331648, sign: false }); - data.append(FP8x23 { mag: 58720256, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 75497472, sign: false }); - data.append(FP8x23 { mag: 83886080, sign: false }); - data.append(FP8x23 { mag: 92274688, sign: false }); - data.append(FP8x23 { mag: 100663296, sign: false }); - data.append(FP8x23 { mag: 109051904, sign: false }); - data.append(FP8x23 { mag: 117440512, sign: false }); - data.append(FP8x23 { mag: 125829120, sign: false }); - data.append(FP8x23 { mag: 134217728, sign: false }); - data.append(FP8x23 { mag: 142606336, sign: false }); - data.append(FP8x23 { mag: 150994944, sign: false }); - data.append(FP8x23 { mag: 159383552, sign: false }); - data.append(FP8x23 { mag: 167772160, sign: false }); - data.append(FP8x23 { mag: 176160768, sign: false }); - data.append(FP8x23 { mag: 184549376, sign: false }); - data.append(FP8x23 { mag: 192937984, sign: false }); - data.append(FP8x23 { mag: 201326592, sign: false }); - data.append(FP8x23 { mag: 209715200, sign: false }); - data.append(FP8x23 { mag: 218103808, sign: false }); - data.append(FP8x23 { mag: 75497472, sign: false }); - data.append(FP8x23 { mag: 83886080, sign: false }); - data.append(FP8x23 { mag: 92274688, sign: false }); - data.append(FP8x23 { mag: 100663296, sign: false }); - data.append(FP8x23 { mag: 109051904, sign: false }); - data.append(FP8x23 { mag: 117440512, sign: false }); - data.append(FP8x23 { mag: 125829120, sign: false }); - data.append(FP8x23 { mag: 134217728, sign: false }); - data.append(FP8x23 { mag: 142606336, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - data.append(FP8x23 { mag: 41943040, sign: false }); - data.append(FP8x23 { mag: 50331648, sign: false }); - data.append(FP8x23 { mag: 58720256, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 150994944, sign: false }); - data.append(FP8x23 { mag: 159383552, sign: false }); - data.append(FP8x23 { mag: 167772160, sign: false }); - data.append(FP8x23 { mag: 176160768, sign: false }); - data.append(FP8x23 { mag: 184549376, sign: false }); - data.append(FP8x23 { mag: 192937984, sign: false }); - data.append(FP8x23 { mag: 201326592, sign: false }); - data.append(FP8x23 { mag: 209715200, sign: false }); - data.append(FP8x23 { mag: 218103808, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_i32_3d_axis1.cairo b/tests/nodes/gather_i32_3d_axis1.cairo deleted file mode 100644 index 6dbb78c47..000000000 --- a/tests/nodes/gather_i32_3d_axis1.cairo +++ /dev/null @@ -1,24 +0,0 @@ -mod input_0; -mod input_1; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_gather_i32_3d_axis1() { - let input_0 = input_0::input_0(); - let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/gather_i32_3d_axis1/input_0.cairo b/tests/nodes/gather_i32_3d_axis1/input_0.cairo deleted file mode 100644 index 5562ac725..000000000 --- a/tests/nodes/gather_i32_3d_axis1/input_0.cairo +++ /dev/null @@ -1,41 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_i32_3d_axis1/output_0.cairo b/tests/nodes/gather_i32_3d_axis1/output_0.cairo deleted file mode 100644 index ec7de3b93..000000000 --- a/tests/nodes/gather_i32_3d_axis1/output_0.cairo +++ /dev/null @@ -1,69 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(2); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(3); - data.append(4); - data.append(5); - data.append(0); - data.append(1); - data.append(2); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(12); - data.append(13); - data.append(14); - data.append(9); - data.append(10); - data.append(11); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - data.append(21); - data.append(22); - data.append(23); - data.append(18); - data.append(19); - data.append(20); - data.append(24); - data.append(25); - data.append(26); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_i32_3d_axis2/input_0.cairo b/tests/nodes/gather_i32_3d_axis2/input_0.cairo deleted file mode 100644 index 5562ac725..000000000 --- a/tests/nodes/gather_i32_3d_axis2/input_0.cairo +++ /dev/null @@ -1,41 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_i32_3d_axis2/input_1.cairo b/tests/nodes/gather_i32_3d_axis2/input_1.cairo deleted file mode 100644 index b30e2f0c2..000000000 --- a/tests/nodes/gather_i32_3d_axis2/input_1.cairo +++ /dev/null @@ -1,19 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_1() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_i32_3d_axis2/output_0.cairo b/tests/nodes/gather_i32_3d_axis2/output_0.cairo deleted file mode 100644 index 6f2069a71..000000000 --- a/tests/nodes/gather_i32_3d_axis2/output_0.cairo +++ /dev/null @@ -1,69 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(4); - data.append(3); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(7); - data.append(6); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(10); - data.append(9); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(13); - data.append(12); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(16); - data.append(15); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(19); - data.append(18); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(22); - data.append(21); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - data.append(25); - data.append(24); - data.append(26); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_i32_3d_default/input_0.cairo b/tests/nodes/gather_i32_3d_default/input_0.cairo deleted file mode 100644 index 5562ac725..000000000 --- a/tests/nodes/gather_i32_3d_default/input_0.cairo +++ /dev/null @@ -1,41 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_i32_3d_default/input_1.cairo b/tests/nodes/gather_i32_3d_default/input_1.cairo deleted file mode 100644 index b30e2f0c2..000000000 --- a/tests/nodes/gather_i32_3d_default/input_1.cairo +++ /dev/null @@ -1,19 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_1() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_i32_3d_default/output_0.cairo b/tests/nodes/gather_i32_3d_default/output_0.cairo deleted file mode 100644 index aae477d9f..000000000 --- a/tests/nodes/gather_i32_3d_default/output_0.cairo +++ /dev/null @@ -1,69 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_i8_3d_axis1.cairo b/tests/nodes/gather_i8_3d_axis1.cairo deleted file mode 100644 index 140608123..000000000 --- a/tests/nodes/gather_i8_3d_axis1.cairo +++ /dev/null @@ -1,24 +0,0 @@ -mod input_0; -mod input_1; -mod output_0; - - -use orion::operators::tensor::I8TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_gather_i8_3d_axis1() { - let input_0 = input_0::input_0(); - let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/gather_i8_3d_axis1/input_0.cairo b/tests/nodes/gather_i8_3d_axis1/input_0.cairo deleted file mode 100644 index 3e59c35b2..000000000 --- a/tests/nodes/gather_i8_3d_axis1/input_0.cairo +++ /dev/null @@ -1,41 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_i8_3d_axis1/input_1.cairo b/tests/nodes/gather_i8_3d_axis1/input_1.cairo deleted file mode 100644 index b30e2f0c2..000000000 --- a/tests/nodes/gather_i8_3d_axis1/input_1.cairo +++ /dev/null @@ -1,19 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_1() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_i8_3d_axis1/output_0.cairo b/tests/nodes/gather_i8_3d_axis1/output_0.cairo deleted file mode 100644 index 443ea5bb5..000000000 --- a/tests/nodes/gather_i8_3d_axis1/output_0.cairo +++ /dev/null @@ -1,69 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(2); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(3); - data.append(4); - data.append(5); - data.append(0); - data.append(1); - data.append(2); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(12); - data.append(13); - data.append(14); - data.append(9); - data.append(10); - data.append(11); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - data.append(21); - data.append(22); - data.append(23); - data.append(18); - data.append(19); - data.append(20); - data.append(24); - data.append(25); - data.append(26); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_i8_3d_axis2.cairo b/tests/nodes/gather_i8_3d_axis2.cairo deleted file mode 100644 index 992cee33e..000000000 --- a/tests/nodes/gather_i8_3d_axis2.cairo +++ /dev/null @@ -1,24 +0,0 @@ -mod input_0; -mod input_1; -mod output_0; - - -use orion::operators::tensor::I8TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_gather_i8_3d_axis2() { - let input_0 = input_0::input_0(); - let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/gather_i8_3d_axis2/input_0.cairo b/tests/nodes/gather_i8_3d_axis2/input_0.cairo deleted file mode 100644 index 3e59c35b2..000000000 --- a/tests/nodes/gather_i8_3d_axis2/input_0.cairo +++ /dev/null @@ -1,41 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_i8_3d_axis2/input_1.cairo b/tests/nodes/gather_i8_3d_axis2/input_1.cairo deleted file mode 100644 index b30e2f0c2..000000000 --- a/tests/nodes/gather_i8_3d_axis2/input_1.cairo +++ /dev/null @@ -1,19 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_1() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_i8_3d_axis2/output_0.cairo b/tests/nodes/gather_i8_3d_axis2/output_0.cairo deleted file mode 100644 index 81d7430f1..000000000 --- a/tests/nodes/gather_i8_3d_axis2/output_0.cairo +++ /dev/null @@ -1,69 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(4); - data.append(3); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(7); - data.append(6); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(10); - data.append(9); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(13); - data.append(12); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(16); - data.append(15); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(19); - data.append(18); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(22); - data.append(21); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - data.append(25); - data.append(24); - data.append(26); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_i8_3d_default.cairo b/tests/nodes/gather_i8_3d_default.cairo deleted file mode 100644 index 0f8e6dec2..000000000 --- a/tests/nodes/gather_i8_3d_default.cairo +++ /dev/null @@ -1,24 +0,0 @@ -mod input_0; -mod input_1; -mod output_0; - - -use orion::operators::tensor::I8TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_gather_i8_3d_default() { - let input_0 = input_0::input_0(); - let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/gather_i8_3d_default/input_0.cairo b/tests/nodes/gather_i8_3d_default/input_0.cairo deleted file mode 100644 index 3e59c35b2..000000000 --- a/tests/nodes/gather_i8_3d_default/input_0.cairo +++ /dev/null @@ -1,41 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_i8_3d_default/input_1.cairo b/tests/nodes/gather_i8_3d_default/input_1.cairo deleted file mode 100644 index b30e2f0c2..000000000 --- a/tests/nodes/gather_i8_3d_default/input_1.cairo +++ /dev/null @@ -1,19 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_1() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_i8_3d_default/output_0.cairo b/tests/nodes/gather_i8_3d_default/output_0.cairo deleted file mode 100644 index 6ec6b9b4a..000000000 --- a/tests/nodes/gather_i8_3d_default/output_0.cairo +++ /dev/null @@ -1,69 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_i32_3d_default.cairo b/tests/nodes/gather_negative_axis.cairo similarity index 67% rename from tests/nodes/gather_i32_3d_default.cairo rename to tests/nodes/gather_negative_axis.cairo index 4c0b9c9bd..27c511614 100644 --- a/tests/nodes/gather_i32_3d_default.cairo +++ b/tests/nodes/gather_negative_axis.cairo @@ -4,21 +4,21 @@ mod output_0; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::FP16x16TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::U32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; #[test] #[available_gas(2000000000)] -fn test_gather_i32_3d_default() { +fn test_gather_negative_axis() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); + let y_0 = input_0.gather(indices:input_1, axis:Option::Some(-1)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_negative_axis/input_0.cairo b/tests/nodes/gather_negative_axis/input_0.cairo new file mode 100644 index 000000000..5d8deafac --- /dev/null +++ b/tests/nodes/gather_negative_axis/input_0.cairo @@ -0,0 +1,41 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1703936, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/gather_fp8x23_3d_default/input_1.cairo b/tests/nodes/gather_negative_axis/input_1.cairo similarity index 83% rename from tests/nodes/gather_fp8x23_3d_default/input_1.cairo rename to tests/nodes/gather_negative_axis/input_1.cairo index b30e2f0c2..d5bbc25be 100644 --- a/tests/nodes/gather_fp8x23_3d_default/input_1.cairo +++ b/tests/nodes/gather_negative_axis/input_1.cairo @@ -1,9 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn input_1() -> Tensor { +fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(2); diff --git a/tests/nodes/gather_negative_axis/output_0.cairo b/tests/nodes/gather_negative_axis/output_0.cairo new file mode 100644 index 000000000..147779c35 --- /dev/null +++ b/tests/nodes/gather_negative_axis/output_0.cairo @@ -0,0 +1,69 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(3); + shape.append(3); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 917504, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1245184, sign: false }); + data.append(FP16x16 { mag: 1179648, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1441792, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1703936, sign: false }); + data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 1703936, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/gather_i32_3d_axis2.cairo b/tests/nodes/gather_negative_indices.cairo similarity index 67% rename from tests/nodes/gather_i32_3d_axis2.cairo rename to tests/nodes/gather_negative_indices.cairo index 29bd217b3..559a276ea 100644 --- a/tests/nodes/gather_i32_3d_axis2.cairo +++ b/tests/nodes/gather_negative_indices.cairo @@ -4,21 +4,21 @@ mod output_0; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::FP16x16TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::U32TensorPartialEq; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; #[test] #[available_gas(2000000000)] -fn test_gather_i32_3d_axis2() { +fn test_gather_negative_indices() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); let z_0 = output_0::output_0(); - let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); + let y_0 = input_0.gather(indices:input_1, axis:Option::Some(0)); assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_negative_indices/input_0.cairo b/tests/nodes/gather_negative_indices/input_0.cairo new file mode 100644 index 000000000..5f0a0a23c --- /dev/null +++ b/tests/nodes/gather_negative_indices/input_0.cairo @@ -0,0 +1,22 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(10); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/gather_i32_3d_axis1/input_1.cairo b/tests/nodes/gather_negative_indices/input_1.cairo similarity index 59% rename from tests/nodes/gather_i32_3d_axis1/input_1.cairo rename to tests/nodes/gather_negative_indices/input_1.cairo index b30e2f0c2..b013cb6b7 100644 --- a/tests/nodes/gather_i32_3d_axis1/input_1.cairo +++ b/tests/nodes/gather_negative_indices/input_1.cairo @@ -1,19 +1,15 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::numbers::NumberTrait; -fn input_1() -> Tensor { +fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); - shape.append(2); let mut data = ArrayTrait::new(); data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(0); - data.append(2); + data.append(-9); + data.append(-10); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/gather_negative_indices/output_0.cairo b/tests/nodes/gather_negative_indices/output_0.cairo new file mode 100644 index 000000000..65efd394d --- /dev/null +++ b/tests/nodes/gather_negative_indices/output_0.cairo @@ -0,0 +1,15 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/gather_u32_3d_axis1.cairo b/tests/nodes/gather_u32_3d_axis1.cairo deleted file mode 100644 index 1a7a56d37..000000000 --- a/tests/nodes/gather_u32_3d_axis1.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod input_1; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_gather_u32_3d_axis1() { - let input_0 = input_0::input_0(); - let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.gather(indices: input_1, axis: Option::Some(1)); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/gather_u32_3d_axis1/input_0.cairo b/tests/nodes/gather_u32_3d_axis1/input_0.cairo deleted file mode 100644 index d9508bf41..000000000 --- a/tests/nodes/gather_u32_3d_axis1/input_0.cairo +++ /dev/null @@ -1,50 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(4); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - data.append(27); - data.append(28); - data.append(29); - data.append(30); - data.append(31); - data.append(32); - data.append(33); - data.append(34); - data.append(35); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_u32_3d_axis1/input_1.cairo b/tests/nodes/gather_u32_3d_axis1/input_1.cairo deleted file mode 100644 index 99840a243..000000000 --- a/tests/nodes/gather_u32_3d_axis1/input_1.cairo +++ /dev/null @@ -1,19 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_1() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(1); - data.append(3); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_u32_3d_axis1/output_0.cairo b/tests/nodes/gather_u32_3d_axis1/output_0.cairo deleted file mode 100644 index 5ee2e2c5d..000000000 --- a/tests/nodes/gather_u32_3d_axis1/output_0.cairo +++ /dev/null @@ -1,69 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(2); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(3); - data.append(4); - data.append(5); - data.append(3); - data.append(4); - data.append(5); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(15); - data.append(16); - data.append(17); - data.append(15); - data.append(16); - data.append(17); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - data.append(27); - data.append(28); - data.append(29); - data.append(30); - data.append(31); - data.append(32); - data.append(27); - data.append(28); - data.append(29); - data.append(27); - data.append(28); - data.append(29); - data.append(33); - data.append(34); - data.append(35); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_u32_3d_axis2.cairo b/tests/nodes/gather_u32_3d_axis2.cairo deleted file mode 100644 index 30d5f6a61..000000000 --- a/tests/nodes/gather_u32_3d_axis2.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod input_1; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_gather_u32_3d_axis2() { - let input_0 = input_0::input_0(); - let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.gather(indices: input_1, axis: Option::Some(2)); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/gather_u32_3d_axis2/input_0.cairo b/tests/nodes/gather_u32_3d_axis2/input_0.cairo deleted file mode 100644 index d9508bf41..000000000 --- a/tests/nodes/gather_u32_3d_axis2/input_0.cairo +++ /dev/null @@ -1,50 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(4); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - data.append(27); - data.append(28); - data.append(29); - data.append(30); - data.append(31); - data.append(32); - data.append(33); - data.append(34); - data.append(35); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_u32_3d_axis2/input_1.cairo b/tests/nodes/gather_u32_3d_axis2/input_1.cairo deleted file mode 100644 index ad29ac0bc..000000000 --- a/tests/nodes/gather_u32_3d_axis2/input_1.cairo +++ /dev/null @@ -1,19 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_1() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(1); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_u32_3d_axis2/output_0.cairo b/tests/nodes/gather_u32_3d_axis2/output_0.cairo deleted file mode 100644 index 39c4b0eb3..000000000 --- a/tests/nodes/gather_u32_3d_axis2/output_0.cairo +++ /dev/null @@ -1,87 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(4); - shape.append(3); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(4); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(7); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(10); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(13); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(16); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(19); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(22); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - data.append(25); - data.append(25); - data.append(26); - data.append(27); - data.append(28); - data.append(29); - data.append(28); - data.append(28); - data.append(29); - data.append(30); - data.append(31); - data.append(32); - data.append(31); - data.append(31); - data.append(32); - data.append(33); - data.append(34); - data.append(35); - data.append(34); - data.append(34); - data.append(35); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_u32_3d_default.cairo b/tests/nodes/gather_u32_3d_default.cairo deleted file mode 100644 index 8f223c4af..000000000 --- a/tests/nodes/gather_u32_3d_default.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod input_1; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_gather_u32_3d_default() { - let input_0 = input_0::input_0(); - let input_1 = input_1::input_1(); - let z_0 = output_0::output_0(); - - let y_0 = input_0.gather(indices: input_1, axis: Option::Some(0)); - - assert_eq(y_0, z_0); -} diff --git a/tests/nodes/gather_u32_3d_default/input_0.cairo b/tests/nodes/gather_u32_3d_default/input_0.cairo deleted file mode 100644 index d9508bf41..000000000 --- a/tests/nodes/gather_u32_3d_default/input_0.cairo +++ /dev/null @@ -1,50 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(4); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - data.append(27); - data.append(28); - data.append(29); - data.append(30); - data.append(31); - data.append(32); - data.append(33); - data.append(34); - data.append(35); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_u32_3d_default/input_1.cairo b/tests/nodes/gather_u32_3d_default/input_1.cairo deleted file mode 100644 index b30e2f0c2..000000000 --- a/tests/nodes/gather_u32_3d_default/input_1.cairo +++ /dev/null @@ -1,19 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn input_1() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_u32_3d_default/output_0.cairo b/tests/nodes/gather_u32_3d_default/output_0.cairo deleted file mode 100644 index b079689fd..000000000 --- a/tests/nodes/gather_u32_3d_default/output_0.cairo +++ /dev/null @@ -1,87 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::numbers::NumberTrait; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(2); - shape.append(4); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - data.append(27); - data.append(28); - data.append(29); - data.append(30); - data.append(31); - data.append(32); - data.append(33); - data.append(34); - data.append(35); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(24); - data.append(25); - data.append(26); - data.append(27); - data.append(28); - data.append(29); - data.append(30); - data.append(31); - data.append(32); - data.append(33); - data.append(34); - data.append(35); - TensorTrait::new(shape.span(), data.span()) -} From d89527d47f7bf4eba53411cc2180faea890dd829 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Fri, 22 Mar 2024 17:02:54 +0100 Subject: [PATCH 35/68] update doc --- docs/framework/operators/tensor/tensor.gather.md | 8 ++++---- src/operators/tensor/core.cairo | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/framework/operators/tensor/tensor.gather.md b/docs/framework/operators/tensor/tensor.gather.md index 218b01385..e94f9c6c1 100644 --- a/docs/framework/operators/tensor/tensor.gather.md +++ b/docs/framework/operators/tensor/tensor.gather.md @@ -1,7 +1,7 @@ # tensor.gather ```rust - fn gather(self: @Tensor, indices: Tensor, axis: Option) -> Tensor; + fn gather(self: @Tensor, indices: Tensor, axis: Option) -> Tensor; ``` Gather entries of the axis dimension of data. @@ -9,8 +9,8 @@ Gather entries of the axis dimension of data. ## Args * `self`(`@Tensor`) - The input tensor. -* `indices`(`Tensor`) - Tensor of indices. -* `axis`(`Option`) - Axis to gather on. Default: axis=0. +* `indices`(`Tensor`) - Tensor of indices. +* `axis`(`Option`) - Axis to gather on. Default: axis=0. ## Panics @@ -32,7 +32,7 @@ fn gather_example() -> Tensor { shape: array![2, 3].span(), data: array![[ 1, 2, 3],[4, 5, 6]].span(), ); - let indices = TensorTrait::::new( + let indices = TensorTrait::::new( shape: array![1, 1].span(), data: array![1, 0].span(), ); diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index a5919cb85..e48796471 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -3166,7 +3166,7 @@ trait TensorTrait { /// # tensor.gather /// /// ```rust - /// fn gather(self: @Tensor, indices: Tensor, axis: Option) -> Tensor; + /// fn gather(self: @Tensor, indices: Tensor, axis: Option) -> Tensor; /// ``` /// /// Gather entries of the axis dimension of data. @@ -3174,8 +3174,8 @@ trait TensorTrait { /// ## Args /// /// * `self`(`@Tensor`) - The input tensor. - /// * `indices`(`Tensor`) - Tensor of indices. - /// * `axis`(`Option`) - Axis to gather on. Default: axis=0. + /// * `indices`(`Tensor`) - Tensor of indices. + /// * `axis`(`Option`) - Axis to gather on. Default: axis=0. /// /// ## Panics /// @@ -3197,7 +3197,7 @@ trait TensorTrait { /// shape: array![2, 3].span(), /// data: array![[ 1, 2, 3],[4, 5, 6]].span(), /// ); - /// let indices = TensorTrait::::new( + /// let indices = TensorTrait::::new( /// shape: array![1, 1].span(), /// data: array![1, 0].span(), /// ); From f9b321690a94023ac6b32ca1e096edd690f163f5 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Fri, 22 Mar 2024 17:11:36 +0100 Subject: [PATCH 36/68] refactor operator --- src/operators/tensor/core.cairo | 2 +- .../tensor/implementations/tensor_bool.cairo | 2 +- .../implementations/tensor_complex64.cairo | 2 +- .../implementations/tensor_fp16x16.cairo | 2 +- .../implementations/tensor_fp16x16wide.cairo | 2 +- .../implementations/tensor_fp32x32.cairo | 2 +- .../implementations/tensor_fp64x64.cairo | 2 +- .../tensor/implementations/tensor_fp8x23.cairo | 2 +- .../implementations/tensor_fp8x23wide.cairo | 2 +- .../tensor/implementations/tensor_i32.cairo | 2 +- .../tensor/implementations/tensor_i8.cairo | 2 +- .../tensor/implementations/tensor_u32.cairo | 2 +- src/operators/tensor/math/less.cairo | 18 ++++++------------ 13 files changed, 18 insertions(+), 24 deletions(-) diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index e48796471..eab961d21 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -1309,7 +1309,7 @@ trait TensorTrait { /// >>> [0,0,0,0,0,0,0,1,1] /// ``` /// - fn less(self: @Tensor, other: @Tensor) -> Tensor; + fn less(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.less_equal /// /// ```rust diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index 82de59236..8d872300f 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -112,7 +112,7 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 43f0b8d3f..00336e44b 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -127,7 +127,7 @@ impl Complex64Tensor of TensorTrait { panic(array!['not supported!']) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index b2a77e2f0..b934cc4f7 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -123,7 +123,7 @@ impl FP16x16Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index 2beef19c8..b93913166 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -133,7 +133,7 @@ impl FP16x16WTensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 52dfb8147..127caf29e 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -120,7 +120,7 @@ impl FP32x32Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 700ef33d4..b61480023 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -120,7 +120,7 @@ impl FP64x64Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index 3e6f3ccfa..c4bd1138b 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -120,7 +120,7 @@ impl FP8x23Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index 1181ad3a1..92e1bc82c 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -123,7 +123,7 @@ impl FP8x23WTensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index c5c88a5d8..0e6e29904 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -121,7 +121,7 @@ impl I32Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 78126051b..8c73bf0cd 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -118,7 +118,7 @@ impl I8Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index 1df2de437..014dc8f30 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -117,7 +117,7 @@ impl U32Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/math/less.cairo b/src/operators/tensor/math/less.cairo index 35f9b4d73..240c92c57 100644 --- a/src/operators/tensor/math/less.cairo +++ b/src/operators/tensor/math/less.cairo @@ -1,20 +1,14 @@ -use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; +use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, BoolTensor}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility }; /// Cf: TensorTrait::less docstring -fn less< - T, - impl UsizeFTensor: TensorTrait, - impl TPartialOrd: PartialOrd, - impl TCopy: Copy, - impl TDrop: Drop ->( +fn less, impl TCopy: Copy, impl TDrop: Drop>( y: @Tensor, z: @Tensor -) -> Tensor { +) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); @@ -26,9 +20,9 @@ fn less< let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted); if *(*y.data)[indices_self] < *(*z.data)[indices_other] { - result.append(1); + result.append(true); } else { - result.append(0); + result.append(false); } n += 1; From fd79c375b2aaa02ce100cec58b247c2d96f44e7b Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Fri, 22 Mar 2024 17:14:06 +0100 Subject: [PATCH 37/68] update tests --- nodegen/node/less.py | 20 +++---- tests/nodes.cairo | 20 +++---- tests/nodes/less_fp16x16.cairo | 14 ++--- tests/nodes/less_fp16x16/input_0.cairo | 26 ++++----- tests/nodes/less_fp16x16/input_1.cairo | 28 ++++----- tests/nodes/less_fp16x16/output_0.cairo | 58 +++++++++---------- tests/nodes/less_fp16x16_broadcast.cairo | 14 ++--- .../less_fp16x16_broadcast/input_0.cairo | 26 ++++----- .../less_fp16x16_broadcast/input_1.cairo | 6 +- .../less_fp16x16_broadcast/output_0.cairo | 58 +++++++++---------- tests/nodes/less_fp8x23.cairo | 16 ++--- tests/nodes/less_fp8x23/input_0.cairo | 24 ++++---- tests/nodes/less_fp8x23/input_1.cairo | 30 +++++----- tests/nodes/less_fp8x23/output_0.cairo | 58 +++++++++---------- tests/nodes/less_fp8x23_broadcast.cairo | 16 ++--- .../nodes/less_fp8x23_broadcast/input_0.cairo | 26 ++++----- .../nodes/less_fp8x23_broadcast/input_1.cairo | 4 +- .../less_fp8x23_broadcast/output_0.cairo | 58 +++++++++---------- tests/nodes/less_i32.cairo | 14 ++--- tests/nodes/less_i32/input_0.cairo | 31 +++++----- tests/nodes/less_i32/input_1.cairo | 31 +++++----- tests/nodes/less_i32/output_0.cairo | 58 +++++++++---------- tests/nodes/less_i32_broadcast.cairo | 14 ++--- tests/nodes/less_i32_broadcast/input_0.cairo | 27 ++++----- tests/nodes/less_i32_broadcast/input_1.cairo | 9 +-- tests/nodes/less_i32_broadcast/output_0.cairo | 58 +++++++++---------- tests/nodes/less_i8.cairo | 16 ++--- tests/nodes/less_i8/input_0.cairo | 31 +++++----- tests/nodes/less_i8/input_1.cairo | 25 ++++---- tests/nodes/less_i8/output_0.cairo | 58 +++++++++---------- tests/nodes/less_i8_broadcast.cairo | 16 ++--- tests/nodes/less_i8_broadcast/input_0.cairo | 33 ++++++----- tests/nodes/less_i8_broadcast/input_1.cairo | 5 +- tests/nodes/less_i8_broadcast/output_0.cairo | 58 +++++++++---------- tests/nodes/less_u32.cairo | 12 ++-- tests/nodes/less_u32/input_0.cairo | 31 +++++----- tests/nodes/less_u32/input_1.cairo | 29 +++++----- tests/nodes/less_u32/output_0.cairo | 58 +++++++++---------- tests/nodes/less_u32_broadcast.cairo | 12 ++-- tests/nodes/less_u32_broadcast/input_0.cairo | 29 +++++----- tests/nodes/less_u32_broadcast/input_1.cairo | 5 +- tests/nodes/less_u32_broadcast/output_0.cairo | 58 +++++++++---------- 42 files changed, 618 insertions(+), 602 deletions(-) diff --git a/nodegen/node/less.py b/nodegen/node/less.py index 20b39263d..14af93201 100644 --- a/nodegen/node/less.py +++ b/nodegen/node/less.py @@ -13,7 +13,7 @@ def default(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "less_u32" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -25,7 +25,7 @@ def broadcast(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "less_u32_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -42,7 +42,7 @@ def default(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "less_i32" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -54,7 +54,7 @@ def broadcast(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "less_i32_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -71,7 +71,7 @@ def default(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "less_i8" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -83,7 +83,7 @@ def broadcast(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "less_i8_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -102,7 +102,7 @@ def default(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "less_fp8x23" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -116,7 +116,7 @@ def broadcast(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "less_fp8x23_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -135,7 +135,7 @@ def default(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "less_fp16x16" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -149,7 +149,7 @@ def broadcast(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.BOOL, z.shape, z.flatten()) name = "less_fp16x16_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 1f37081b1..7d1b9a0be 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -348,16 +348,6 @@ // mod xor_i8_broadcast; // mod xor_u32; // mod xor_u32_broadcast; -// mod less_fp16x16; -// mod less_fp16x16_broadcast; -// mod less_fp8x23; -// mod less_fp8x23_broadcast; -// mod less_i32; -// mod less_i32_broadcast; -// mod less_i8; -// mod less_i8_broadcast; -// mod less_u32; -// mod less_u32_broadcast; // mod greater_equal_fp16x16; // mod greater_equal_fp16x16_broadcast; // mod greater_equal_fp8x23; @@ -1037,3 +1027,13 @@ mod gather_fp16x16_3d_axis1; mod gather_fp16x16_3d_axis2; mod gather_negative_indices; mod gather_negative_axis; +mod less_fp16x16; +mod less_fp16x16_broadcast; +mod less_fp8x23; +mod less_fp8x23_broadcast; +mod less_i32; +mod less_i32_broadcast; +mod less_i8; +mod less_i8_broadcast; +mod less_u32; +mod less_u32_broadcast; diff --git a/tests/nodes/less_fp16x16.cairo b/tests/nodes/less_fp16x16.cairo index 04ac88b63..e81287d03 100644 --- a/tests/nodes/less_fp16x16.cairo +++ b/tests/nodes/less_fp16x16.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::BoolTensor; +use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_fp16x16() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less(@input_1); + let y_0 = input_0.less(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_fp16x16/input_0.cairo b/tests/nodes/less_fp16x16/input_0.cairo index 41fa7524d..0921e77c9 100644 --- a/tests/nodes/less_fp16x16/input_0.cairo +++ b/tests/nodes/less_fp16x16/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16/input_1.cairo b/tests/nodes/less_fp16x16/input_1.cairo index fe0e56e41..2951ab380 100644 --- a/tests/nodes/less_fp16x16/input_1.cairo +++ b/tests/nodes/less_fp16x16/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16/output_0.cairo b/tests/nodes/less_fp16x16/output_0.cairo index ff7a8e63d..188202362 100644 --- a/tests/nodes/less_fp16x16/output_0.cairo +++ b/tests/nodes/less_fp16x16/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(0); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(false); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16_broadcast.cairo b/tests/nodes/less_fp16x16_broadcast.cairo index 787c07448..20b4b839f 100644 --- a/tests/nodes/less_fp16x16_broadcast.cairo +++ b/tests/nodes/less_fp16x16_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::BoolTensor; +use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_fp16x16_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less(@input_1); + let y_0 = input_0.less(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_fp16x16_broadcast/input_0.cairo b/tests/nodes/less_fp16x16_broadcast/input_0.cairo index 18782c0dd..6b2a71053 100644 --- a/tests/nodes/less_fp16x16_broadcast/input_0.cairo +++ b/tests/nodes/less_fp16x16_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16_broadcast/input_1.cairo b/tests/nodes/less_fp16x16_broadcast/input_1.cairo index 743355c3d..a5cfb8938 100644 --- a/tests/nodes/less_fp16x16_broadcast/input_1.cairo +++ b/tests/nodes/less_fp16x16_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -10,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16_broadcast/output_0.cairo b/tests/nodes/less_fp16x16_broadcast/output_0.cairo index 7d4613a88..b02badbac 100644 --- a/tests/nodes/less_fp16x16_broadcast/output_0.cairo +++ b/tests/nodes/less_fp16x16_broadcast/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23.cairo b/tests/nodes/less_fp8x23.cairo index 6fe7b08b8..ff63fe9f2 100644 --- a/tests/nodes/less_fp8x23.cairo +++ b/tests/nodes/less_fp8x23.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_less_fp8x23() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less(@input_1); + let y_0 = input_0.less(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_fp8x23/input_0.cairo b/tests/nodes/less_fp8x23/input_0.cairo index fbcd9f2a8..aade41632 100644 --- a/tests/nodes/less_fp8x23/input_0.cairo +++ b/tests/nodes/less_fp8x23/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23/input_1.cairo b/tests/nodes/less_fp8x23/input_1.cairo index e27ba84da..94dfe4f24 100644 --- a/tests/nodes/less_fp8x23/input_1.cairo +++ b/tests/nodes/less_fp8x23/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23/output_0.cairo b/tests/nodes/less_fp8x23/output_0.cairo index 33906ca90..24e0b32fa 100644 --- a/tests/nodes/less_fp8x23/output_0.cairo +++ b/tests/nodes/less_fp8x23/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(0); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23_broadcast.cairo b/tests/nodes/less_fp8x23_broadcast.cairo index e8b3155c5..b1eae545f 100644 --- a/tests/nodes/less_fp8x23_broadcast.cairo +++ b/tests/nodes/less_fp8x23_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_less_fp8x23_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less(@input_1); + let y_0 = input_0.less(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_fp8x23_broadcast/input_0.cairo b/tests/nodes/less_fp8x23_broadcast/input_0.cairo index 88ad7277b..841487689 100644 --- a/tests/nodes/less_fp8x23_broadcast/input_0.cairo +++ b/tests/nodes/less_fp8x23_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23_broadcast/input_1.cairo b/tests/nodes/less_fp8x23_broadcast/input_1.cairo index 29b68e7c3..20ee2633a 100644 --- a/tests/nodes/less_fp8x23_broadcast/input_1.cairo +++ b/tests/nodes/less_fp8x23_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -11,7 +11,7 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23_broadcast/output_0.cairo b/tests/nodes/less_fp8x23_broadcast/output_0.cairo index fbf242193..00ed53108 100644 --- a/tests/nodes/less_fp8x23_broadcast/output_0.cairo +++ b/tests/nodes/less_fp8x23_broadcast/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(true); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32.cairo b/tests/nodes/less_i32.cairo index 4a251b995..9e003aec4 100644 --- a/tests/nodes/less_i32.cairo +++ b/tests/nodes/less_i32.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::BoolTensor; +use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_i32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less(@input_1); + let y_0 = input_0.less(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_i32/input_0.cairo b/tests/nodes/less_i32/input_0.cairo index ab59d73f2..cbb18e299 100644 --- a/tests/nodes/less_i32/input_0.cairo +++ b/tests/nodes/less_i32/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-1); - data.append(-2); - data.append(2); - data.append(2); - data.append(2); + data.append(-3); data.append(-2); + data.append(0); data.append(1); data.append(1); data.append(1); data.append(0); + data.append(-1); + data.append(0); + data.append(-1); + data.append(1); data.append(-2); - data.append(-3); + data.append(0); + data.append(-2); + data.append(0); data.append(-1); data.append(-1); data.append(-2); data.append(2); data.append(-2); data.append(-1); - data.append(-3); - data.append(0); - data.append(0); - data.append(-3); - data.append(-3); + data.append(1); data.append(-2); - data.append(2); - data.append(-3); - data.append(0); + data.append(1); + data.append(-2); + data.append(-2); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32/input_1.cairo b/tests/nodes/less_i32/input_1.cairo index cadacc785..bff10843b 100644 --- a/tests/nodes/less_i32/input_1.cairo +++ b/tests/nodes/less_i32/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); - data.append(-1); - data.append(0); - data.append(-2); + data.append(1); data.append(2); + data.append(0); data.append(1); - data.append(-1); - data.append(-3); - data.append(-3); data.append(-2); data.append(-2); data.append(2); - data.append(-1); - data.append(-3); data.append(2); - data.append(1); - data.append(-2); + data.append(2); data.append(-1); - data.append(-2); + data.append(0); data.append(1); - data.append(-3); + data.append(2); data.append(-1); data.append(0); - data.append(1); data.append(0); + data.append(1); data.append(2); + data.append(-2); + data.append(-3); + data.append(-1); + data.append(-3); data.append(0); + data.append(-2); + data.append(-3); + data.append(2); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32/output_0.cairo b/tests/nodes/less_i32/output_0.cairo index bff093e8b..fa40143fd 100644 --- a/tests/nodes/less_i32/output_0.cairo +++ b/tests/nodes/less_i32/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(0); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(true); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32_broadcast.cairo b/tests/nodes/less_i32_broadcast.cairo index 552150976..56bbe7722 100644 --- a/tests/nodes/less_i32_broadcast.cairo +++ b/tests/nodes/less_i32_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::BoolTensor; +use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] fn test_less_i32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less(@input_1); + let y_0 = input_0.less(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_i32_broadcast/input_0.cairo b/tests/nodes/less_i32_broadcast/input_0.cairo index e49eaf809..fe220acc6 100644 --- a/tests/nodes/less_i32_broadcast/input_0.cairo +++ b/tests/nodes/less_i32_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -10,31 +11,31 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(-3); - data.append(-2); data.append(1); - data.append(-3); data.append(2); - data.append(-1); - data.append(-3); + data.append(-2); data.append(0); - data.append(2); + data.append(-2); data.append(1); - data.append(-1); data.append(0); data.append(1); - data.append(1); data.append(0); - data.append(1); - data.append(-3); data.append(2); + data.append(0); + data.append(-1); + data.append(-1); + data.append(-2); data.append(1); + data.append(-2); + data.append(2); + data.append(2); data.append(2); data.append(0); - data.append(0); + data.append(2); + data.append(2); data.append(0); data.append(-3); data.append(0); - data.append(2); - data.append(-2); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32_broadcast/input_1.cairo b/tests/nodes/less_i32_broadcast/input_1.cairo index 99c50d44f..b6b877733 100644 --- a/tests/nodes/less_i32_broadcast/input_1.cairo +++ b/tests/nodes/less_i32_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(-3); - data.append(0); - data.append(1); + data.append(-1); + data.append(-2); + data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32_broadcast/output_0.cairo b/tests/nodes/less_i32_broadcast/output_0.cairo index 79967537c..31e21d966 100644 --- a/tests/nodes/less_i32_broadcast/output_0.cairo +++ b/tests/nodes/less_i32_broadcast/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(1); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8.cairo b/tests/nodes/less_i8.cairo index 085a6da35..0ccc89ef6 100644 --- a/tests/nodes/less_i8.cairo +++ b/tests/nodes/less_i8.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_less_i8() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less(@input_1); + let y_0 = input_0.less(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_i8/input_0.cairo b/tests/nodes/less_i8/input_0.cairo index 28dd5a905..bf97a41b0 100644 --- a/tests/nodes/less_i8/input_0.cairo +++ b/tests/nodes/less_i8/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -13,28 +14,28 @@ fn input_0() -> Tensor { data.append(0); data.append(-3); data.append(-2); - data.append(-1); data.append(2); - data.append(-1); - data.append(-3); - data.append(-1); - data.append(-3); - data.append(-3); - data.append(-1); - data.append(-1); + data.append(2); + data.append(2); + data.append(2); data.append(-2); + data.append(0); + data.append(-2); + data.append(2); + data.append(2); + data.append(-2); + data.append(-2); + data.append(0); data.append(-2); - data.append(-3); - data.append(-1); - data.append(1); data.append(-2); data.append(0); + data.append(1); data.append(-3); data.append(2); - data.append(-3); - data.append(-2); data.append(2); data.append(0); - data.append(0); + data.append(1); + data.append(-2); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8/input_1.cairo b/tests/nodes/less_i8/input_1.cairo index 9dc4e7a6c..cf5a6620b 100644 --- a/tests/nodes/less_i8/input_1.cairo +++ b/tests/nodes/less_i8/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-3); + data.append(-1); data.append(1); - data.append(-2); data.append(-3); - data.append(2); data.append(-1); data.append(0); data.append(-1); data.append(-2); data.append(1); - data.append(0); - data.append(2); - data.append(-2); data.append(1); - data.append(-2); - data.append(-3); data.append(2); - data.append(0); data.append(-1); - data.append(0); - data.append(0); data.append(-2); data.append(2); + data.append(-2); + data.append(1); + data.append(1); + data.append(2); data.append(-1); + data.append(-3); data.append(2); data.append(0); + data.append(0); + data.append(-3); + data.append(-1); data.append(-3); + data.append(1); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8/output_0.cairo b/tests/nodes/less_i8/output_0.cairo index 9398cc8d3..2795730dc 100644 --- a/tests/nodes/less_i8/output_0.cairo +++ b/tests/nodes/less_i8/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); + data.append(true); + data.append(true); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8_broadcast.cairo b/tests/nodes/less_i8_broadcast.cairo index fb705a81d..f56369ba4 100644 --- a/tests/nodes/less_i8_broadcast.cairo +++ b/tests/nodes/less_i8_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_less_i8_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less(@input_1); + let y_0 = input_0.less(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_i8_broadcast/input_0.cairo b/tests/nodes/less_i8_broadcast/input_0.cairo index eac6c02fd..da5b4c091 100644 --- a/tests/nodes/less_i8_broadcast/input_0.cairo +++ b/tests/nodes/less_i8_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-2); - data.append(-2); - data.append(1); + data.append(2); + data.append(2); data.append(1); - data.append(0); - data.append(0); - data.append(-1); - data.append(-3); - data.append(0); data.append(2); - data.append(-3); data.append(1); data.append(1); data.append(2); - data.append(-3); data.append(2); + data.append(0); + data.append(-3); + data.append(-2); + data.append(-2); + data.append(1); + data.append(1); data.append(-3); - data.append(2); data.append(1); - data.append(0); - data.append(-1); - data.append(0); - data.append(-1); data.append(1); data.append(0); + data.append(-1); + data.append(2); data.append(-2); + data.append(0); + data.append(-3); data.append(2); + data.append(-2); + data.append(0); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8_broadcast/input_1.cairo b/tests/nodes/less_i8_broadcast/input_1.cairo index d593d06e4..01a2cdb2d 100644 --- a/tests/nodes/less_i8_broadcast/input_1.cairo +++ b/tests/nodes/less_i8_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); + data.append(-3); data.append(-1); - data.append(-2); data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8_broadcast/output_0.cairo b/tests/nodes/less_i8_broadcast/output_0.cairo index f68e15ea8..fd762de3a 100644 --- a/tests/nodes/less_i8_broadcast/output_0.cairo +++ b/tests/nodes/less_i8_broadcast/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32.cairo b/tests/nodes/less_u32.cairo index 412895527..78019ea42 100644 --- a/tests/nodes/less_u32.cairo +++ b/tests/nodes/less_u32.cairo @@ -3,10 +3,12 @@ mod input_1; mod output_0; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; -use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::U32TensorPartialEq; #[test] @@ -14,9 +16,9 @@ use orion::operators::tensor::U32TensorPartialEq; fn test_less_u32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less(@input_1); + let y_0 = input_0.less(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_u32/input_0.cairo b/tests/nodes/less_u32/input_0.cairo index 15b2924f3..bc06571cc 100644 --- a/tests/nodes/less_u32/input_0.cairo +++ b/tests/nodes/less_u32/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(4); + data.append(1); data.append(0); - data.append(2); data.append(0); - data.append(4); - data.append(5); + data.append(1); data.append(0); + data.append(1); data.append(4); + data.append(2); data.append(4); + data.append(5); + data.append(0); data.append(1); data.append(5); + data.append(1); + data.append(5); + data.append(4); data.append(3); + data.append(0); data.append(3); - data.append(2); - data.append(5); - data.append(5); data.append(4); - data.append(1); - data.append(1); data.append(4); + data.append(5); + data.append(5); data.append(4); + data.append(0); data.append(2); - data.append(2); - data.append(2); - data.append(4); - data.append(4); - data.append(3); - data.append(5); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32/input_1.cairo b/tests/nodes/less_u32/input_1.cairo index e540f7f40..df1accf66 100644 --- a/tests/nodes/less_u32/input_1.cairo +++ b/tests/nodes/less_u32/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); + data.append(3); + data.append(4); + data.append(1); + data.append(3); data.append(1); data.append(5); + data.append(1); data.append(5); + data.append(3); + data.append(0); data.append(2); + data.append(3); + data.append(4); data.append(1); + data.append(0); + data.append(4); + data.append(5); data.append(1); data.append(2); - data.append(1); - data.append(3); data.append(3); - data.append(0); data.append(5); - data.append(2); data.append(0); - data.append(0); - data.append(0); - data.append(4); - data.append(1); data.append(1); data.append(5); + data.append(1); data.append(4); data.append(4); - data.append(2); - data.append(3); - data.append(2); - data.append(5); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32/output_0.cairo b/tests/nodes/less_u32/output_0.cairo index 75a278131..9c6097ec0 100644 --- a/tests/nodes/less_u32/output_0.cairo +++ b/tests/nodes/less_u32/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); + data.append(false); + data.append(true); + data.append(true); + data.append(true); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(true); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(true); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32_broadcast.cairo b/tests/nodes/less_u32_broadcast.cairo index 9a7ac7a22..28106718a 100644 --- a/tests/nodes/less_u32_broadcast.cairo +++ b/tests/nodes/less_u32_broadcast.cairo @@ -3,10 +3,12 @@ mod input_1; mod output_0; +use orion::operators::tensor::BoolTensorPartialEq; +use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; -use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::U32TensorPartialEq; #[test] @@ -14,9 +16,9 @@ use orion::operators::tensor::U32TensorPartialEq; fn test_less_u32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less(@input_1); + let y_0 = input_0.less(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_u32_broadcast/input_0.cairo b/tests/nodes/less_u32_broadcast/input_0.cairo index 655814fc8..dd6ff0d5a 100644 --- a/tests/nodes/less_u32_broadcast/input_0.cairo +++ b/tests/nodes/less_u32_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(1); data.append(5); - data.append(5); - data.append(3); - data.append(4); data.append(4); - data.append(5); data.append(3); + data.append(2); data.append(0); - data.append(5); data.append(3); + data.append(2); + data.append(5); + data.append(1); data.append(3); - data.append(0); data.append(4); - data.append(1); - data.append(5); - data.append(0); - data.append(2); data.append(2); + data.append(5); data.append(1); + data.append(0); data.append(3); - data.append(5); - data.append(5); + data.append(3); + data.append(4); + data.append(2); data.append(5); data.append(4); data.append(5); data.append(5); - data.append(2); + data.append(5); + data.append(1); + data.append(4); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32_broadcast/input_1.cairo b/tests/nodes/less_u32_broadcast/input_1.cairo index bcb20d101..ece89984a 100644 --- a/tests/nodes/less_u32_broadcast/input_1.cairo +++ b/tests/nodes/less_u32_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -11,6 +12,6 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(3); data.append(0); - data.append(0); + data.append(3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32_broadcast/output_0.cairo b/tests/nodes/less_u32_broadcast/output_0.cairo index ad7acc0af..1717e77f9 100644 --- a/tests/nodes/less_u32_broadcast/output_0.cairo +++ b/tests/nodes/less_u32_broadcast/output_0.cairo @@ -1,40 +1,40 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::BoolTensor; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(false); + data.append(true); + data.append(false); TensorTrait::new(shape.span(), data.span()) } From 1ea8222f71a8b98fae13e8112c79722cfb828a26 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Fri, 22 Mar 2024 17:15:59 +0100 Subject: [PATCH 38/68] update doc --- docs/framework/operators/tensor/tensor.less.md | 8 ++++---- src/operators/tensor/core.cairo | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/framework/operators/tensor/tensor.less.md b/docs/framework/operators/tensor/tensor.less.md index d5d264d8a..65f44ba41 100644 --- a/docs/framework/operators/tensor/tensor.less.md +++ b/docs/framework/operators/tensor/tensor.less.md @@ -1,7 +1,7 @@ #tensor.less ```rust - fn less(self: @Tensor, other: @Tensor) -> Tensor; + fn less(self: @Tensor, other: @Tensor) -> Tensor; ``` Check if each element of the first tensor is less than the corresponding element of the second tensor. @@ -20,7 +20,7 @@ The input tensors must have either: ## Returns -A new `Tensor` of booleans (0 or 1) with the same shape as the broadcasted inputs. +A new `Tensor` of booleans with the same shape as the broadcasted inputs. ## Examples @@ -43,7 +43,7 @@ fn less_example() -> Tensor { // We can call `less` function as follows. return tensor_1.less(@tensor_2); } ->>> [0,0,0,0,0,0,1,0,0] +>>> [false,false,false,false,false,false,true,false,false] ``` Case 2: Compare tensors with different shapes @@ -63,5 +63,5 @@ fn less_example() -> Tensor { // We can call `less` function as follows. return tensor_1.less(@tensor_2); } ->>> [0,0,0,0,0,0,0,1,1] +>>> [false,false,false,false,false,false,false,true,true] ``` diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index eab961d21..c0bcf5e7c 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -1244,7 +1244,7 @@ trait TensorTrait { /// #tensor.less /// /// ```rust - /// fn less(self: @Tensor, other: @Tensor) -> Tensor; + /// fn less(self: @Tensor, other: @Tensor) -> Tensor; /// ``` /// /// Check if each element of the first tensor is less than the corresponding element of the second tensor. @@ -1263,7 +1263,7 @@ trait TensorTrait { /// /// ## Returns /// - /// A new `Tensor` of booleans (0 or 1) with the same shape as the broadcasted inputs. + /// A new `Tensor` of booleans with the same shape as the broadcasted inputs. /// /// ## Examples /// @@ -1286,7 +1286,7 @@ trait TensorTrait { /// // We can call `less` function as follows. /// return tensor_1.less(@tensor_2); /// } - /// >>> [0,0,0,0,0,0,1,0,0] + /// >>> [false,false,false,false,false,false,true,false,false] /// ``` /// /// Case 2: Compare tensors with different shapes @@ -1306,7 +1306,7 @@ trait TensorTrait { /// // We can call `less` function as follows. /// return tensor_1.less(@tensor_2); /// } - /// >>> [0,0,0,0,0,0,0,1,1] + /// >>> [false,false,false,false,false,false,false,true,true] /// ``` /// fn less(self: @Tensor, other: @Tensor) -> Tensor; From 1cdeeafb80c526caa9ded0be12c43fc97dae3cd5 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Fri, 22 Mar 2024 17:59:42 +0100 Subject: [PATCH 39/68] refactor operator --- src/operators/nn/functional/col2im.cairo | 66 ++--- .../nn/functional/conv_transpose.cairo | 236 ++++++++++-------- .../nn/functional/depth_to_space.cairo | 33 ++- .../nn/functional/space_to_depth.cairo | 31 ++- src/operators/tensor/core.cairo | 81 +++++- .../tensor/implementations/tensor_bool.cairo | 4 +- .../implementations/tensor_complex64.cairo | 4 +- .../implementations/tensor_fp16x16.cairo | 4 +- .../implementations/tensor_fp16x16wide.cairo | 4 +- .../implementations/tensor_fp32x32.cairo | 4 +- .../implementations/tensor_fp64x64.cairo | 4 +- .../implementations/tensor_fp8x23.cairo | 4 +- .../implementations/tensor_fp8x23wide.cairo | 4 +- .../tensor/implementations/tensor_i32.cairo | 4 +- .../tensor/implementations/tensor_i8.cairo | 4 +- .../tensor/implementations/tensor_u32.cairo | 4 +- .../manipulation/split_to_sequence.cairo | 172 +++++++------ src/operators/tensor/math/flatten.cairo | 9 +- .../tensor/math/layer_normalization.cairo | 18 +- 19 files changed, 427 insertions(+), 263 deletions(-) diff --git a/src/operators/nn/functional/col2im.cairo b/src/operators/nn/functional/col2im.cairo index 4f9cfc1a8..b08d9f650 100644 --- a/src/operators/nn/functional/col2im.cairo +++ b/src/operators/nn/functional/col2im.cairo @@ -56,43 +56,53 @@ fn col2im, +NumberTrait, +Copy, +Drop, +Ad let bl = prod(block_shape, 0); let C = *(*data).shape.at(1) / bl; - let mut new_shape = array![*(*data).shape.at(0), C, bl]; + let mut new_shape: Array = array![ + (*(*data).shape.at(0)).try_into().unwrap(), C.try_into().unwrap(), bl.try_into().unwrap() + ]; let mut i = 2; - while i != (*data).shape.len() { - new_shape.append(*(*data).shape.at(i)); - i += 1; - }; + while i != (*data) + .shape + .len() { + new_shape.append((*(*data).shape.at(i)).try_into().unwrap()); + i += 1; + }; - let data = data.reshape(new_shape.span()); + let data = data.reshape(new_shape.span(), false); let mut res: Array = array![]; let data_stride = stride(data.shape); let mut n = 0; - while n != *data.shape.at(0) { - let mut c = 0; - while c != *data.shape.at(1) { - let data_n_c = TensorTrait::new( - SpanTrait::slice(data.shape, 2, data.shape.len() - 2), - SpanTrait::slice( - data.data, n * *data_stride.at(0) + c * *data_stride.at(1), *data_stride.at(1) - ) - ); - let mut out = col2im_naive_implementation( - @data_n_c, image_shape, block_shape, dilations, pads, strides - ); - let mut i = 0; - while i != out.len() { - res.append(out.at(i)); - i += 1; - }; + while n != *data + .shape + .at(0) { + let mut c = 0; + while c != *data + .shape + .at(1) { + let data_n_c = TensorTrait::new( + SpanTrait::slice(data.shape, 2, data.shape.len() - 2), + SpanTrait::slice( + data.data, + n * *data_stride.at(0) + c * *data_stride.at(1), + *data_stride.at(1) + ) + ); + let mut out = col2im_naive_implementation( + @data_n_c, image_shape, block_shape, dilations, pads, strides + ); + let mut i = 0; + while i != out.len() { + res.append(out.at(i)); + i += 1; + }; + + c += 1; + }; - c += 1; + n += 1; }; - n += 1; - }; - let mut new_shape = array![*data.shape.at(0), *data.shape.at(1)]; let mut i = 0; while i != image_shape.len() { @@ -289,4 +299,4 @@ fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul< }; prod -} +} \ No newline at end of file diff --git a/src/operators/nn/functional/conv_transpose.cairo b/src/operators/nn/functional/conv_transpose.cairo index f8f810558..111646a27 100644 --- a/src/operators/nn/functional/conv_transpose.cairo +++ b/src/operators/nn/functional/conv_transpose.cairo @@ -61,11 +61,13 @@ fn conv_transpose< Option::None => { let mut output_padding: Array = array![]; let mut i = 2; - while i != (*X).shape.len() { - output_padding.append(0); - output_padding.append(0); - i += 1; - }; + while i != (*X) + .shape + .len() { + output_padding.append(0); + output_padding.append(0); + i += 1; + }; output_padding.span() }, @@ -151,10 +153,11 @@ fn conv_transpose< Option::None => { let mut output_shape: Array = array![]; let mut i = 0; - while i != strides.len() { - output_shape.append(*(*X).shape.at(i + 2) * *strides.at(i)); - i += 1; - }; + while i != strides + .len() { + output_shape.append(*(*X).shape.at(i + 2) * *strides.at(i)); + i += 1; + }; output_shape.span() }, @@ -162,16 +165,17 @@ fn conv_transpose< let mut total_padding: Array = array![]; let mut i = 0; - while i != output_shape.len() { - total_padding - .append( - (*(*X).shape.at(i + 2) - 1) * *strides.at(i) - + *output_padding.at(i) - + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1) - - *output_shape.at(i) - ); - i += 1; - }; + while i != output_shape + .len() { + total_padding + .append( + (*(*X).shape.at(i + 2) - 1) * *strides.at(i) + + *output_padding.at(i) + + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1) + - *output_shape.at(i) + ); + i += 1; + }; let total_padding = total_padding.span(); @@ -184,10 +188,11 @@ fn conv_transpose< }; let mut i = 0; - while i != output_shape.len() { - pads.append(*total_padding.at(i) - (*total_padding.at(i) / 2)); - i += 1; - }; + while i != output_shape + .len() { + pads.append(*total_padding.at(i) - (*total_padding.at(i) / 2)); + i += 1; + }; (pads.span(), pads.len() / 2, output_shape) }, @@ -197,10 +202,11 @@ fn conv_transpose< Option::None => { let mut output_shape: Array = array![]; let mut i = 0; - while i != strides.len() { - output_shape.append(*(*X).shape.at(i + 2) * *strides.at(i)); - i += 1; - }; + while i != strides + .len() { + output_shape.append(*(*X).shape.at(i + 2) * *strides.at(i)); + i += 1; + }; output_shape.span() }, @@ -208,26 +214,28 @@ fn conv_transpose< let mut total_padding: Array = array![]; let mut i = 0; - while i != output_shape.len() { - total_padding - .append( - (*(*X).shape.at(i + 2) - 1) * *strides.at(i) - + *output_padding.at(i) - + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1) - - *output_shape.at(i) - ); - i += 1; - }; + while i != output_shape + .len() { + total_padding + .append( + (*(*X).shape.at(i + 2) - 1) * *strides.at(i) + + *output_padding.at(i) + + ((*kernel_shape.at(i) - 1) * *dilations.at(i) + 1) + - *output_shape.at(i) + ); + i += 1; + }; let total_padding = total_padding.span(); let mut pads: Array = array![]; let mut i = 0; - while i != output_shape.len() { - pads.append(*total_padding.at(i) - *total_padding.at(i) / 2); - i += 1; - }; + while i != output_shape + .len() { + pads.append(*total_padding.at(i) - *total_padding.at(i) / 2); + i += 1; + }; let mut i = 0; while i != output_shape.len() { @@ -302,50 +310,63 @@ fn conv_transpose< if group == 1 { let mut image_id = 0; - while image_id != *(*X).shape.at(0) { - let w_t = TensorTrait::new(array![k, m].span(), (*W).data) - .transpose(array![1, 0].span()); - - let image = SpanTrait::slice((*X).data, image_id * k * n, k * n); - let gemm = w_t.matmul(@TensorTrait::new(array![k, n].span(), image)); - - let gemmc = gemm - .reshape(array![num_output_channels, m / num_output_channels, n].span()); - let mut c = 0; - while c != num_output_channels { - let gemmc_c = TensorTrait::new( - array![m / num_output_channels, n].span(), - SpanTrait::slice( - gemmc.data, (m / num_output_channels) * n * c, (m / num_output_channels) * n - ) - ); - - let mut res = col2im_naive_implementation( - @gemmc_c, output_shape, kernel_shape, dilations, pads, strides - ); - - match B { - Option::Some(B) => { - let mut i = 0; - while i != res.len() { - res.set(i, res.at(i) + *(*B).data.at(c)); - i += 1; - }; - }, - Option::None => {}, - } + while image_id != *(*X) + .shape + .at(0) { + let w_t = TensorTrait::new(array![k, m].span(), (*W).data) + .transpose(array![1, 0].span()); + + let image = SpanTrait::slice((*X).data, image_id * k * n, k * n); + let gemm = w_t.matmul(@TensorTrait::new(array![k, n].span(), image)); + + let gemmc = gemm + .reshape( + array![ + num_output_channels.try_into().unwrap(), + (m / num_output_channels).try_into().unwrap(), + n.try_into().unwrap() + ] + .span(), + false + ); + let mut c = 0; + while c != num_output_channels { + let gemmc_c = TensorTrait::new( + array![m / num_output_channels, n].span(), + SpanTrait::slice( + gemmc.data, + (m / num_output_channels) * n * c, + (m / num_output_channels) * n + ) + ); + + let mut res = col2im_naive_implementation( + @gemmc_c, output_shape, kernel_shape, dilations, pads, strides + ); + + match B { + Option::Some(B) => { + let mut i = 0; + while i != res + .len() { + res.set(i, res.at(i) + *(*B).data.at(c)); + i += 1; + }; + }, + Option::None => {}, + } - c += 1; + c += 1; - let mut i = 0; - while i != res.len() { - final.append(res.at(i)); - i += 1; + let mut i = 0; + while i != res.len() { + final.append(res.at(i)); + i += 1; + }; }; - }; - image_id += 1; - }; + image_id += 1; + }; } else { let mut output_array: Array> = array![]; @@ -363,19 +384,21 @@ fn conv_transpose< let mut group_W: Array = array![]; let mut image_id = 0; - while image_id != *(*X).shape.at(0) { - let start = image_id * n * C + (group_id * C / group) * n; - let end = image_id * n * C + ((group_id + 1) * C / group) * n; + while image_id != *(*X) + .shape + .at(0) { + let start = image_id * n * C + (group_id * C / group) * n; + let end = image_id * n * C + ((group_id + 1) * C / group) * n; + + let mut i = start; + while i != end { + group_X.append(*(*X).data.at(i)); + i += 1; + }; - let mut i = start; - while i != end { - group_X.append(*(*X).data.at(i)); - i += 1; + image_id += 1; }; - image_id += 1; - }; - let start = (group_id * C / group) * *(*W).shape.at(1) * kernel_size; let end = (group_id + 1) * C / group * *(*W).shape.at(1) * kernel_size; let mut i = start; @@ -433,22 +456,26 @@ fn conv_transpose< // Sorting result per item of the batch // output size : N (batch size) x num_output_channels x output_shape let mut image_id = 0; - while image_id != *(*X).shape.at(0) { - let mut group_id = 0; - while group_id != group { - let group_output = *output_array.at(group_id); - let mut i = image_id * output_size * (num_output_channels / group); - - while i != (image_id + 1) * output_size * (num_output_channels / group) { - final.append(*group_output.at(i)); - i += 1; + while image_id != *(*X) + .shape + .at(0) { + let mut group_id = 0; + while group_id != group { + let group_output = *output_array.at(group_id); + let mut i = image_id * output_size * (num_output_channels / group); + + while i != (image_id + 1) + * output_size + * (num_output_channels / group) { + final.append(*group_output.at(i)); + i += 1; + }; + + group_id += 1; }; - group_id += 1; + image_id += 1; }; - - image_id += 1; - }; } let mut shape = array![*(*X).shape.at(0), num_output_channels]; @@ -650,4 +677,3 @@ fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul< prod } - diff --git a/src/operators/nn/functional/depth_to_space.cairo b/src/operators/nn/functional/depth_to_space.cairo index 161ea46ad..c34bd1439 100644 --- a/src/operators/nn/functional/depth_to_space.cairo +++ b/src/operators/nn/functional/depth_to_space.cairo @@ -20,24 +20,35 @@ fn depth_to_space< ) -> Tensor { assert((tensor.shape).len() == 4, 'Unexpected shape 4.'); - let b = (tensor.shape).at(0); - let C = (tensor.shape).at(1); - let H = (tensor.shape).at(2); - let W = (tensor.shape).at(3); - let finalshape = array![*b, *C / (blocksize * blocksize), *H * blocksize, *W * blocksize]; + let blocksize_i32: i32 = blocksize.try_into().unwrap(); + + let b: i32 = (*(tensor.shape).at(0)).try_into().unwrap(); + let C: u32 = (*(tensor.shape).at(1)).try_into().unwrap(); + let H: i32 = (*(tensor.shape).at(2)).try_into().unwrap(); + let W: i32 = (*(tensor.shape).at(3)).try_into().unwrap(); + let finalshape: Array = array![ + b, + (C / (blocksize * blocksize)).try_into().unwrap(), + (H * blocksize_i32), + (W * blocksize_i32) + ]; if mode == 'DCR' { - let tmpshape = array![*b, blocksize, blocksize, *C / (blocksize * blocksize), *H, *W]; - let reshaped = (tensor).reshape(target_shape: tmpshape.span()); + let tmpshape: Array = array![ + b, blocksize_i32, blocksize_i32, (C / (blocksize * blocksize)).try_into().unwrap(), H, W + ]; + let reshaped = (tensor).reshape(target_shape: tmpshape.span(), allowzero: false); let transposed = reshaped.transpose(axes: array![0, 3, 4, 1, 5, 2].span()); - transposed.reshape(target_shape: finalshape.span()) + transposed.reshape(target_shape: finalshape.span(), allowzero: false) } else { // assert mode == "CRD" - let tmpshape = array![*b, *C / (blocksize * blocksize), blocksize, blocksize, *H, *W]; - let reshaped = (tensor).reshape(target_shape: tmpshape.span()); + let tmpshape: Array = array![ + b, (C / (blocksize * blocksize)).try_into().unwrap(), blocksize_i32, blocksize_i32, H, W + ]; + let reshaped = (tensor).reshape(target_shape: tmpshape.span(), allowzero: false); let transposed = reshaped.transpose(axes: array![0, 1, 4, 2, 5, 3].span()); - transposed.reshape(target_shape: finalshape.span()) + transposed.reshape(target_shape: finalshape.span(), allowzero: false) } } diff --git a/src/operators/nn/functional/space_to_depth.cairo b/src/operators/nn/functional/space_to_depth.cairo index d8e8089cb..bfaf61774 100644 --- a/src/operators/nn/functional/space_to_depth.cairo +++ b/src/operators/nn/functional/space_to_depth.cairo @@ -1,3 +1,4 @@ +use core::option::OptionTrait; use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait}; @@ -20,14 +21,28 @@ fn space_to_depth< ) -> Tensor { assert((tensor.shape).len() == 4, 'Unexpected shape 4.'); - let b = (tensor.shape).at(0); - let C = (tensor.shape).at(1); - let H = (tensor.shape).at(2); - let W = (tensor.shape).at(3); - let tmpshape = array![*b, *C, *H / blocksize, blocksize, *W / blocksize, blocksize]; - let reshaped = (tensor).reshape(target_shape: tmpshape.span()); + let blocksize_i32: i32 = blocksize.try_into().unwrap(); + + let b: i32 = (*(tensor.shape).at(0)).try_into().unwrap(); + let C: i32 = (*(tensor.shape).at(1)).try_into().unwrap(); + let H: u32 = (*(tensor.shape).at(2)); + let W: u32 = (*(tensor.shape).at(3)); + let tmpshape = array![ + b, + C, + (H / blocksize).try_into().unwrap(), + blocksize_i32, + (W / blocksize).try_into().unwrap(), + blocksize_i32 + ]; + let reshaped = (tensor).reshape(target_shape: tmpshape.span(), allowzero: false); let transposed = reshaped.transpose(axes: array![0, 3, 5, 1, 2, 4].span()); - let finalshape = array![*b, *C * blocksize * blocksize, *H / blocksize, *W / blocksize]; + let finalshape = array![ + b, + C * blocksize_i32 * blocksize_i32, + (H / blocksize).try_into().unwrap(), + (W / blocksize).try_into().unwrap() + ]; - transposed.reshape(target_shape: finalshape.span()) + transposed.reshape(target_shape: finalshape.span(), allowzero: false) } diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index c0bcf5e7c..8e2178c79 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -594,7 +594,7 @@ trait TensorTrait { /// >>> [[0,1,2,3], [4,5,6,7]] /// ``` /// - fn reshape(self: @Tensor, target_shape: Span) -> Tensor; + fn reshape(self: @Tensor, target_shape: Span, allowzero: bool) -> Tensor; /// # tensor.transpose /// /// ```rust @@ -5945,8 +5945,83 @@ fn stride(mut shape: Span) -> Span { /// Cf: TensorTrait::reshape docstring -fn reshape(self: @Tensor, target_shape: Span) -> Tensor { - new_tensor(target_shape, *self.data) +fn reshape>>( + self: @Tensor, target_shape: Span, allowzero: bool +) -> Tensor { + // Calculate the total number of elements in the original tensor + let mut total_elements = 1; + let mut shape = *self.shape; + loop { + match shape.pop_front() { + Option::Some(val) => total_elements *= *val, + Option::None => { break; } + }; + }; + + // Calculate 'elements_so_far' and find 'inferred_index' + let mut elements_so_far = 1; + let mut inferred_index = Option::None; + let mut target_shape_clone = target_shape.clone(); + let mut i: usize = 0; + loop { + match target_shape_clone.pop_front() { + Option::Some(dim) => { + if *dim == -1 { + if inferred_index.is_none() { + inferred_index = Option::Some(i); + } else { + panic!("Only one dimension can be inferred"); + } + } else if *dim == 0 && allowzero == false { + // When allowzero is not set, copy the dimension size from the original tensor + if i >= (*self.shape).len() { + panic!("Dimension out of bounds for using original dimension value"); + } + elements_so_far *= *(*self).shape.at(i); + } else if *dim >= 0 { + elements_so_far *= (*dim).try_into().unwrap(); + } else { + panic!("Invalid dimension size"); + }; + }, + Option::None => { break; } + }; + i += 1; + }; + + let mut target_shape_clone = target_shape.clone(); + let mut inferred_shape = ArrayTrait::::new(); + i = 0; // Reset the index for the next loop + loop { + match target_shape_clone.pop_front() { + Option::Some(dim) => { + if *dim == -1 { + inferred_shape.append(total_elements / elements_so_far) // Inferred dimension + } else if *dim == 0 { + if allowzero == true { + inferred_shape + .append( + 0 + ) // Explicitly set the dimension to zero when allowzero is enabled + } else if i < (*self.shape).len() { + inferred_shape + .append( + *(*self).shape.at(i) + ) // Dimension unchanged from original when allowzero is not enabled + } else { + panic!("Dimension out of bounds for using original dimension value"); + } + } else { + inferred_shape + .append((*dim).try_into().unwrap()) // Directly specified dimension + }; + }, + Option::None => { break; } + } + i += 1; + }; + + new_tensor(inferred_shape.span(), *self.data) } /// Cf: TensorTrait::at docstring diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index 8d872300f..be929b5b2 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -60,8 +60,8 @@ impl BoolTensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { - reshape(self, target_shape) + fn reshape(self: @Tensor, target_shape: Span, allowzero: bool) -> Tensor { + reshape(self, target_shape, allowzero) } fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 00336e44b..a635bb84f 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -69,8 +69,8 @@ impl Complex64Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { - reshape(self, target_shape) + fn reshape(self: @Tensor, target_shape: Span, allowzero: bool) -> Tensor { + reshape(self, target_shape, allowzero) } fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index b934cc4f7..846b9d73f 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -71,8 +71,8 @@ impl FP16x16Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { - reshape(self, target_shape) + fn reshape(self: @Tensor, target_shape: Span, allowzero: bool) -> Tensor { + reshape(self, target_shape, allowzero) } fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index b93913166..ed87491c3 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -75,8 +75,8 @@ impl FP16x16WTensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { - reshape(self, target_shape) + fn reshape(self: @Tensor, target_shape: Span, allowzero: bool) -> Tensor { + reshape(self, target_shape, allowzero) } fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 127caf29e..f215f10f4 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -68,8 +68,8 @@ impl FP32x32Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { - reshape(self, target_shape) + fn reshape(self: @Tensor, target_shape: Span, allowzero: bool) -> Tensor { + reshape(self, target_shape, allowzero) } fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index b61480023..9b8811486 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -68,8 +68,8 @@ impl FP64x64Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { - reshape(self, target_shape) + fn reshape(self: @Tensor, target_shape: Span, allowzero: bool) -> Tensor { + reshape(self, target_shape, allowzero) } fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index c4bd1138b..6e2e931e2 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -68,8 +68,8 @@ impl FP8x23Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { - reshape(self, target_shape) + fn reshape(self: @Tensor, target_shape: Span, allowzero: bool) -> Tensor { + reshape(self, target_shape, allowzero) } fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index 92e1bc82c..dcc27247b 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -71,8 +71,8 @@ impl FP8x23WTensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { - reshape(self, target_shape) + fn reshape(self: @Tensor, target_shape: Span, allowzero: bool) -> Tensor { + reshape(self, target_shape, allowzero) } fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 0e6e29904..d0c779e7b 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -68,8 +68,8 @@ impl I32Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { - reshape(self, target_shape) + fn reshape(self: @Tensor, target_shape: Span, allowzero: bool) -> Tensor { + reshape(self, target_shape, allowzero) } fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 8c73bf0cd..e05b05d81 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -66,8 +66,8 @@ impl I8Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { - reshape(self, target_shape) + fn reshape(self: @Tensor, target_shape: Span, allowzero: bool) -> Tensor { + reshape(self, target_shape, allowzero) } fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index 014dc8f30..0723f9cc8 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -65,8 +65,8 @@ impl U32Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span) -> Tensor { - reshape(self, target_shape) + fn reshape(self: @Tensor, target_shape: Span, allowzero: bool) -> Tensor { + reshape(self, target_shape, allowzero) } fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/manipulation/split_to_sequence.cairo b/src/operators/tensor/manipulation/split_to_sequence.cairo index 46dbe1af7..2e8e4704c 100644 --- a/src/operators/tensor/manipulation/split_to_sequence.cairo +++ b/src/operators/tensor/manipulation/split_to_sequence.cairo @@ -1,3 +1,4 @@ +use core::option::OptionTrait; use orion::operators::tensor::{Tensor, TensorTrait, U32Tensor}; use orion::operators::matrix::{MutMatrixTrait, MutMatrix, MutMatrixImpl}; @@ -45,22 +46,24 @@ fn split_to_sequence, +Drop, +TensorTrait,>( if (keepdims == 0 && !has_split) { let mut splited_t_temp: Array> = array![]; let mut i = 0; - while i != splited_t.len() { - let mut shape: Array = array![]; - let mut j = 0; - let shape_in_splited: Span = *splited_t.at(i).shape; - while j != shape_in_splited.len() { - if (j != axis) { - shape.append(*shape_in_splited.at(j)) - } - - j += 1; + while i != splited_t + .len() { + let mut shape: Array = array![]; + let mut j = 0; + let shape_in_splited: Span = *splited_t.at(i).shape; + while j != shape_in_splited + .len() { + if (j != axis) { + shape.append((*shape_in_splited.at(j)).try_into().unwrap()) + } + + j += 1; + }; + + splited_t_temp.append(splited_t[i].reshape(shape.span(), false)); + i += 1; }; - splited_t_temp.append(splited_t[i].reshape(shape.span())); - i += 1; - }; - return splited_t_temp; } splited_t @@ -105,42 +108,45 @@ fn split_num_outputs, +Drop, +TensorTrait,>( let mut sli: MutMatrix = MutMatrixImpl::new((*t).shape.len(), 2); let mut pos: usize = 0; let mut i = 0; - while i != (*t).shape.len() { - let s: usize = *(*t).shape.at(i); - sli.set(i, 0, 0); - sli.set(i, 1, s); - i += 1; - }; + while i != (*t) + .shape + .len() { + let s: usize = *(*t).shape.at(i); + sli.set(i, 0, 0); + sli.set(i, 1, s); + i += 1; + }; let mut i: usize = 0; - while i != split.len() { - let spl = *split.at(i); - sli.set(axis, 0, pos); - pos += spl; - sli.set(axis, 1, pos); - - let end_ele_0 = match sli.get(axis, 0) { - Option::Some(res) => res, - Option::None => { - assert(false, 'Get end_ele_0 is failed'); - 0 - }, - }; - let end_ele_1 = match sli.get(axis, 1) { - Option::Some(res) => res, - Option::None => { - assert(false, 'Get end_ele_0 is failed'); - 0 - }, + while i != split + .len() { + let spl = *split.at(i); + sli.set(axis, 0, pos); + pos += spl; + sli.set(axis, 1, pos); + + let end_ele_0 = match sli.get(axis, 0) { + Option::Some(res) => res, + Option::None => { + assert(false, 'Get end_ele_0 is failed'); + 0 + }, + }; + let end_ele_1 = match sli.get(axis, 1) { + Option::Some(res) => res, + Option::None => { + assert(false, 'Get end_ele_0 is failed'); + 0 + }, + }; + let starts: Span = array![sli.get(0, 0).unwrap(), end_ele_0].span(); + let ends: Span = array![sli.get(0, 1).unwrap(), end_ele_1].span(); + let axes: Option> = Option::None(()); + let steps: Option> = Option::None(()); + let sub_t: Tensor = t.slice(starts, ends, axes, steps); + splited_t.append(sub_t); + i += 1; }; - let starts: Span = array![sli.get(0, 0).unwrap(), end_ele_0].span(); - let ends: Span = array![sli.get(0, 1).unwrap(), end_ele_1].span(); - let axes: Option> = Option::None(()); - let steps: Option> = Option::None(()); - let sub_t: Tensor = t.slice(starts, ends, axes, steps); - splited_t.append(sub_t); - i += 1; - }; splited_t } @@ -154,42 +160,46 @@ fn split_has_split, +Drop, +TensorTrait,>( let mut sli: MutMatrix = MutMatrixImpl::new((*t).shape.len(), 2); let mut pos: usize = 0; let mut i = 0; - while i != (*t).shape.len() { - let s: usize = *(*t).shape.at(i); - sli.set(i, 0, 0); - sli.set(i, 1, s); - i += 1; - }; + while i != (*t) + .shape + .len() { + let s: usize = *(*t).shape.at(i); + sli.set(i, 0, 0); + sli.set(i, 1, s); + i += 1; + }; let mut i: usize = 0; - while i != split.data.len() { - let spl: usize = split.at(indices: array![i].span()); - sli.set(axis, 0, pos); - pos += spl; - sli.set(axis, 1, pos); - - let end_ele_0 = match sli.get(axis, 0) { - Option::Some(res) => { res }, - Option::None => { - assert(false, 'Get end_ele_0 is failed'); - 0 - }, - }; - let end_ele_1 = match sli.get(axis, 1) { - Option::Some(res) => { res }, - Option::None => { - assert(false, 'Get end_ele_0 is failed'); - 0 - }, + while i != split + .data + .len() { + let spl: usize = split.at(indices: array![i].span()); + sli.set(axis, 0, pos); + pos += spl; + sli.set(axis, 1, pos); + + let end_ele_0 = match sli.get(axis, 0) { + Option::Some(res) => { res }, + Option::None => { + assert(false, 'Get end_ele_0 is failed'); + 0 + }, + }; + let end_ele_1 = match sli.get(axis, 1) { + Option::Some(res) => { res }, + Option::None => { + assert(false, 'Get end_ele_0 is failed'); + 0 + }, + }; + let starts: Span = array![sli.get(0, 0).unwrap(), end_ele_0].span(); + let ends: Span = array![sli.get(0, 1).unwrap(), end_ele_1].span(); + let axes: Option> = Option::None(()); + let steps: Option> = Option::None(()); + let sub_t: Tensor = t.slice(starts, ends, axes, steps); + splited_t.append(sub_t); + i += 1; }; - let starts: Span = array![sli.get(0, 0).unwrap(), end_ele_0].span(); - let ends: Span = array![sli.get(0, 1).unwrap(), end_ele_1].span(); - let axes: Option> = Option::None(()); - let steps: Option> = Option::None(()); - let sub_t: Tensor = t.slice(starts, ends, axes, steps); - splited_t.append(sub_t); - i += 1; - }; splited_t -} +} \ No newline at end of file diff --git a/src/operators/tensor/math/flatten.cairo b/src/operators/tensor/math/flatten.cairo index a23671b77..dcc7eb78c 100644 --- a/src/operators/tensor/math/flatten.cairo +++ b/src/operators/tensor/math/flatten.cairo @@ -23,5 +23,12 @@ fn flatten>(self: @Tensor, axis: usize) let new_shape_second_axis = (*self.data).len() / new_shape_first_axis; - self.reshape(array![new_shape_first_axis, new_shape_second_axis].span()) + self + .reshape( + array![ + new_shape_first_axis.try_into().unwrap(), new_shape_second_axis.try_into().unwrap() + ] + .span(), + false + ) } diff --git a/src/operators/tensor/math/layer_normalization.cairo b/src/operators/tensor/math/layer_normalization.cairo index e61e826f5..4adfdca91 100644 --- a/src/operators/tensor/math/layer_normalization.cairo +++ b/src/operators/tensor/math/layer_normalization.cairo @@ -1,3 +1,5 @@ +use core::option::OptionTrait; +use core::traits::TryInto; use orion::numbers::{NumberTrait, I32IntoU32}; use orion::numbers::{FP16x16, FP16x16Impl, FP32x32, FP32x32Impl, FixedTrait}; use orion::operators::tensor::{ @@ -72,8 +74,8 @@ fn layer_normalization< }; let mut shape_matrix = array![]; - shape_matrix.append(row_number); - shape_matrix.append(col_number); + shape_matrix.append(row_number.try_into().unwrap()); + shape_matrix.append(col_number.try_into().unwrap()); // Shape [1, 1] to mutiply one element tensors with 2D matrices let mut shape_one = array![]; @@ -89,7 +91,7 @@ fn layer_normalization< let mut one_tensor = array![]; one_tensor.append(NumberTrait::one()); - let x_mat = self.reshape(shape_matrix.span()); + let x_mat = self.reshape(shape_matrix.span(), false); let x_mean = x_mat.reduce_sum(1, true) / TensorTrait::new(shape_one.span(), col_number_tensor.span()); @@ -126,7 +128,15 @@ fn layer_normalization< *scale }; - let Y = y_mat.reshape((*self).shape) * scale; + let mut i = 0; + let mut target_shape: Array = array![]; + while i < (*self) + .shape + .len() { + target_shape.append((*(*self).shape.at(i)).try_into().unwrap()); + i += 1; + }; + let Y = y_mat.reshape(target_shape.span(), false) * scale; let Y = match B { Option::Some(B) => { From b6b1f55b90f1b2b566caa170bd029874ba0365cb Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Fri, 22 Mar 2024 18:07:57 +0100 Subject: [PATCH 40/68] test reshape --- nodegen/node/reshape.py | 136 ++++++++++++++++++ tests/nodes.cairo | 9 ++ tests/nodes/reshape_extended_dims.cairo | 20 +++ .../nodes/reshape_extended_dims/input_0.cairo | 38 +++++ .../reshape_extended_dims/output_0.cairo | 39 +++++ tests/nodes/reshape_negative_dim.cairo | 20 +++ .../nodes/reshape_negative_dim/input_0.cairo | 38 +++++ .../nodes/reshape_negative_dim/output_0.cairo | 38 +++++ .../reshape_negative_extended_dims.cairo | 20 +++ .../input_0.cairo | 38 +++++ .../output_0.cairo | 39 +++++ tests/nodes/reshape_one_dim.cairo | 20 +++ tests/nodes/reshape_one_dim/input_0.cairo | 38 +++++ tests/nodes/reshape_one_dim/output_0.cairo | 36 +++++ tests/nodes/reshape_reduced_dims.cairo | 20 +++ .../nodes/reshape_reduced_dims/input_0.cairo | 38 +++++ .../nodes/reshape_reduced_dims/output_0.cairo | 37 +++++ tests/nodes/reshape_reordered_all_dims.cairo | 20 +++ .../reshape_reordered_all_dims/input_0.cairo | 38 +++++ .../reshape_reordered_all_dims/output_0.cairo | 38 +++++ tests/nodes/reshape_reordered_last_dims.cairo | 20 +++ .../reshape_reordered_last_dims/input_0.cairo | 38 +++++ .../output_0.cairo | 38 +++++ .../nodes/reshape_zero_and_negative_dim.cairo | 20 +++ .../input_0.cairo | 14 ++ .../output_0.cairo | 14 ++ tests/nodes/reshape_zero_dim.cairo | 20 +++ tests/nodes/reshape_zero_dim/input_0.cairo | 38 +++++ tests/nodes/reshape_zero_dim/output_0.cairo | 39 +++++ 29 files changed, 961 insertions(+) create mode 100644 nodegen/node/reshape.py create mode 100644 tests/nodes/reshape_extended_dims.cairo create mode 100644 tests/nodes/reshape_extended_dims/input_0.cairo create mode 100644 tests/nodes/reshape_extended_dims/output_0.cairo create mode 100644 tests/nodes/reshape_negative_dim.cairo create mode 100644 tests/nodes/reshape_negative_dim/input_0.cairo create mode 100644 tests/nodes/reshape_negative_dim/output_0.cairo create mode 100644 tests/nodes/reshape_negative_extended_dims.cairo create mode 100644 tests/nodes/reshape_negative_extended_dims/input_0.cairo create mode 100644 tests/nodes/reshape_negative_extended_dims/output_0.cairo create mode 100644 tests/nodes/reshape_one_dim.cairo create mode 100644 tests/nodes/reshape_one_dim/input_0.cairo create mode 100644 tests/nodes/reshape_one_dim/output_0.cairo create mode 100644 tests/nodes/reshape_reduced_dims.cairo create mode 100644 tests/nodes/reshape_reduced_dims/input_0.cairo create mode 100644 tests/nodes/reshape_reduced_dims/output_0.cairo create mode 100644 tests/nodes/reshape_reordered_all_dims.cairo create mode 100644 tests/nodes/reshape_reordered_all_dims/input_0.cairo create mode 100644 tests/nodes/reshape_reordered_all_dims/output_0.cairo create mode 100644 tests/nodes/reshape_reordered_last_dims.cairo create mode 100644 tests/nodes/reshape_reordered_last_dims/input_0.cairo create mode 100644 tests/nodes/reshape_reordered_last_dims/output_0.cairo create mode 100644 tests/nodes/reshape_zero_and_negative_dim.cairo create mode 100644 tests/nodes/reshape_zero_and_negative_dim/input_0.cairo create mode 100644 tests/nodes/reshape_zero_and_negative_dim/output_0.cairo create mode 100644 tests/nodes/reshape_zero_dim.cairo create mode 100644 tests/nodes/reshape_zero_dim/input_0.cairo create mode 100644 tests/nodes/reshape_zero_dim/output_0.cairo diff --git a/nodegen/node/reshape.py b/nodegen/node/reshape.py new file mode 100644 index 000000000..417b59125 --- /dev/null +++ b/nodegen/node/reshape.py @@ -0,0 +1,136 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, Tensor, Dtype + +original_shape = [2, 3, 4] +data = np.random.random_sample(original_shape).astype(np.int32) + + +def reshape_reference_implementation( + data: np.ndarray, shape: np.ndarray, allowzero: int = 0 +) -> np.ndarray: + # replace zeros with corresponding dim size + # we need to do this because np.reshape doesn't support 0 by default unless 'allowzero' is set + new_shape = np.copy(shape) + if allowzero == 0: + zeros_index = np.where(shape == 0) + new_shape[zeros_index] = np.array(data.shape)[zeros_index] + reshaped = np.reshape(data, new_shape) + return reshaped + + +class Reshape(RunAll): + @staticmethod + def reshape_reordered_all_dims(): + y = reshape_reference_implementation( + data, np.array([4, 2, 3], dtype=np.int64)) + + x = Tensor(Dtype.I32, data.shape, data.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "reshape_reordered_all_dims" + make_test([x], y, "input_0.reshape(array![4,2,3].span(), false)", name) + + @staticmethod + def reshape_reordered_last_dims(): + y = reshape_reference_implementation( + data, np.array([2, 4, 3], dtype=np.int64)) + + x = Tensor(Dtype.I32, data.shape, data.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "reshape_reordered_last_dims" + make_test([x], y, "input_0.reshape(array![2,4,3].span(), false)", name) + + @staticmethod + def reshape_reduced_dims(): + y = reshape_reference_implementation( + data, np.array([2, 12], dtype=np.int64)) + + x = Tensor(Dtype.I32, data.shape, data.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "reshape_reduced_dims" + make_test([x], y, "input_0.reshape(array![2,12].span(), false)", name) + + @staticmethod + def reshape_extended_dims(): + y = reshape_reference_implementation( + data, np.array([2, 3, 2, 2], dtype=np.int64)) + + x = Tensor(Dtype.I32, data.shape, data.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "reshape_extended_dims" + make_test([x], y, "input_0.reshape(array![2, 3, 2, 2].span(), false)", name) + + @staticmethod + def reshape_one_dim(): + y = reshape_reference_implementation( + data, np.array([24], dtype=np.int64)) + + x = Tensor(Dtype.I32, data.shape, data.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "reshape_one_dim" + make_test([x], y, "input_0.reshape(array![24].span(), false)", name) + + @staticmethod + def reshape_negative_dim(): + y = reshape_reference_implementation( + data, np.array([2, -1, 2], dtype=np.int64)) + + x = Tensor(Dtype.I32, data.shape, data.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "reshape_negative_dim" + make_test([x], y, "input_0.reshape(array![2, -1, 2].span(), false)", name) + + @staticmethod + def reshape_negative_extended_dims(): + y = reshape_reference_implementation( + data, np.array([-1, 2, 3, 4], dtype=np.int64)) + + x = Tensor(Dtype.I32, data.shape, data.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "reshape_negative_extended_dims" + make_test([x], y, "input_0.reshape(array![-1, 2, 3, 4].span(), false)", name) + + @staticmethod + def reshape_zero_dim(): + y = reshape_reference_implementation( + data, np.array([2, 0, 4, 1], dtype=np.int64)) + + x = Tensor(Dtype.I32, data.shape, data.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "reshape_zero_dim" + make_test([x], y, "input_0.reshape(array![2, 0, 4, 1].span(), false)", name) + + @staticmethod + def reshape_zero_and_negative_dim(): + y = reshape_reference_implementation( + data, np.array([2, 0, 1, -1], dtype=np.int64)) + + x = Tensor(Dtype.I32, data.shape, data.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "reshape_zero_and_negative_dim" + make_test([x], y, "input_0.reshape(array![2, 0, 1, -1].span(), false)", name) + + @staticmethod + def reshape_zero_and_negative_dim(): + original_shape = [0, 3, 4] + data = np.random.random_sample(original_shape).astype(np.int32) + + y = reshape_reference_implementation( + data, np.array([3, 4, 0], dtype=np.int64), allowzero=1) + + x = Tensor(Dtype.I32, data.shape, data.flatten()) + y = Tensor(Dtype.I32, y.shape, y.flatten()) + + name = "reshape_zero_and_negative_dim" + make_test([x], y, "input_0.reshape(array![3, 4, 0].span(), true)", name) + + \ No newline at end of file diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 7d1b9a0be..5d48a9b62 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1037,3 +1037,12 @@ mod less_i8; mod less_i8_broadcast; mod less_u32; mod less_u32_broadcast; +mod reshape_extended_dims; +mod reshape_negative_dim; +mod reshape_negative_extended_dims; +mod reshape_one_dim; +mod reshape_reduced_dims; +mod reshape_reordered_all_dims; +mod reshape_reordered_last_dims; +mod reshape_zero_and_negative_dim; +mod reshape_zero_dim; diff --git a/tests/nodes/reshape_extended_dims.cairo b/tests/nodes/reshape_extended_dims.cairo new file mode 100644 index 000000000..00564f8d9 --- /dev/null +++ b/tests/nodes/reshape_extended_dims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reshape_extended_dims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reshape(array![2, 3, 2, 2].span(), false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reshape_extended_dims/input_0.cairo b/tests/nodes/reshape_extended_dims/input_0.cairo new file mode 100644 index 000000000..1bf8d2578 --- /dev/null +++ b/tests/nodes/reshape_extended_dims/input_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_extended_dims/output_0.cairo b/tests/nodes/reshape_extended_dims/output_0.cairo new file mode 100644 index 000000000..de2f5850b --- /dev/null +++ b/tests/nodes/reshape_extended_dims/output_0.cairo @@ -0,0 +1,39 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_negative_dim.cairo b/tests/nodes/reshape_negative_dim.cairo new file mode 100644 index 000000000..cd6921d7b --- /dev/null +++ b/tests/nodes/reshape_negative_dim.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reshape_negative_dim() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reshape(array![2, -1, 2].span(), false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reshape_negative_dim/input_0.cairo b/tests/nodes/reshape_negative_dim/input_0.cairo new file mode 100644 index 000000000..1bf8d2578 --- /dev/null +++ b/tests/nodes/reshape_negative_dim/input_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_negative_dim/output_0.cairo b/tests/nodes/reshape_negative_dim/output_0.cairo new file mode 100644 index 000000000..ad355bfd6 --- /dev/null +++ b/tests/nodes/reshape_negative_dim/output_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(6); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_negative_extended_dims.cairo b/tests/nodes/reshape_negative_extended_dims.cairo new file mode 100644 index 000000000..271cad3f2 --- /dev/null +++ b/tests/nodes/reshape_negative_extended_dims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reshape_negative_extended_dims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reshape(array![-1, 2, 3, 4].span(), false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reshape_negative_extended_dims/input_0.cairo b/tests/nodes/reshape_negative_extended_dims/input_0.cairo new file mode 100644 index 000000000..1bf8d2578 --- /dev/null +++ b/tests/nodes/reshape_negative_extended_dims/input_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_negative_extended_dims/output_0.cairo b/tests/nodes/reshape_negative_extended_dims/output_0.cairo new file mode 100644 index 000000000..66d21516d --- /dev/null +++ b/tests/nodes/reshape_negative_extended_dims/output_0.cairo @@ -0,0 +1,39 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(3); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_one_dim.cairo b/tests/nodes/reshape_one_dim.cairo new file mode 100644 index 000000000..f55ccc386 --- /dev/null +++ b/tests/nodes/reshape_one_dim.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reshape_one_dim() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reshape(array![24].span(), false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reshape_one_dim/input_0.cairo b/tests/nodes/reshape_one_dim/input_0.cairo new file mode 100644 index 000000000..1bf8d2578 --- /dev/null +++ b/tests/nodes/reshape_one_dim/input_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_one_dim/output_0.cairo b/tests/nodes/reshape_one_dim/output_0.cairo new file mode 100644 index 000000000..a6ad8efcb --- /dev/null +++ b/tests/nodes/reshape_one_dim/output_0.cairo @@ -0,0 +1,36 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(24); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_reduced_dims.cairo b/tests/nodes/reshape_reduced_dims.cairo new file mode 100644 index 000000000..7952505d1 --- /dev/null +++ b/tests/nodes/reshape_reduced_dims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reshape_reduced_dims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reshape(array![2,12].span(), false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reshape_reduced_dims/input_0.cairo b/tests/nodes/reshape_reduced_dims/input_0.cairo new file mode 100644 index 000000000..1bf8d2578 --- /dev/null +++ b/tests/nodes/reshape_reduced_dims/input_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_reduced_dims/output_0.cairo b/tests/nodes/reshape_reduced_dims/output_0.cairo new file mode 100644 index 000000000..3ab9777df --- /dev/null +++ b/tests/nodes/reshape_reduced_dims/output_0.cairo @@ -0,0 +1,37 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(12); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_reordered_all_dims.cairo b/tests/nodes/reshape_reordered_all_dims.cairo new file mode 100644 index 000000000..237c867c2 --- /dev/null +++ b/tests/nodes/reshape_reordered_all_dims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reshape_reordered_all_dims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reshape(array![4,2,3].span(), false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reshape_reordered_all_dims/input_0.cairo b/tests/nodes/reshape_reordered_all_dims/input_0.cairo new file mode 100644 index 000000000..1bf8d2578 --- /dev/null +++ b/tests/nodes/reshape_reordered_all_dims/input_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_reordered_all_dims/output_0.cairo b/tests/nodes/reshape_reordered_all_dims/output_0.cairo new file mode 100644 index 000000000..2308361dc --- /dev/null +++ b/tests/nodes/reshape_reordered_all_dims/output_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(4); + shape.append(2); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_reordered_last_dims.cairo b/tests/nodes/reshape_reordered_last_dims.cairo new file mode 100644 index 000000000..5c5f4fd7e --- /dev/null +++ b/tests/nodes/reshape_reordered_last_dims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reshape_reordered_last_dims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reshape(array![2,4,3].span(), false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reshape_reordered_last_dims/input_0.cairo b/tests/nodes/reshape_reordered_last_dims/input_0.cairo new file mode 100644 index 000000000..1bf8d2578 --- /dev/null +++ b/tests/nodes/reshape_reordered_last_dims/input_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_reordered_last_dims/output_0.cairo b/tests/nodes/reshape_reordered_last_dims/output_0.cairo new file mode 100644 index 000000000..bb307aeb5 --- /dev/null +++ b/tests/nodes/reshape_reordered_last_dims/output_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(4); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_zero_and_negative_dim.cairo b/tests/nodes/reshape_zero_and_negative_dim.cairo new file mode 100644 index 000000000..3f4f3230d --- /dev/null +++ b/tests/nodes/reshape_zero_and_negative_dim.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reshape_zero_and_negative_dim() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reshape(array![3, 4, 0].span(), true); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reshape_zero_and_negative_dim/input_0.cairo b/tests/nodes/reshape_zero_and_negative_dim/input_0.cairo new file mode 100644 index 000000000..f8da52ecd --- /dev/null +++ b/tests/nodes/reshape_zero_and_negative_dim/input_0.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(0); + shape.append(3); + shape.append(4); + + let mut data = ArrayTrait::new(); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_zero_and_negative_dim/output_0.cairo b/tests/nodes/reshape_zero_and_negative_dim/output_0.cairo new file mode 100644 index 000000000..7cec007a8 --- /dev/null +++ b/tests/nodes/reshape_zero_and_negative_dim/output_0.cairo @@ -0,0 +1,14 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(4); + shape.append(0); + + let mut data = ArrayTrait::new(); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_zero_dim.cairo b/tests/nodes/reshape_zero_dim.cairo new file mode 100644 index 000000000..95d8d6620 --- /dev/null +++ b/tests/nodes/reshape_zero_dim.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::I32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reshape_zero_dim() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reshape(array![2, 0, 4, 1].span(), false); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reshape_zero_dim/input_0.cairo b/tests/nodes/reshape_zero_dim/input_0.cairo new file mode 100644 index 000000000..1bf8d2578 --- /dev/null +++ b/tests/nodes/reshape_zero_dim/input_0.cairo @@ -0,0 +1,38 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reshape_zero_dim/output_0.cairo b/tests/nodes/reshape_zero_dim/output_0.cairo new file mode 100644 index 000000000..4c6823a34 --- /dev/null +++ b/tests/nodes/reshape_zero_dim/output_0.cairo @@ -0,0 +1,39 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + shape.append(4); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + TensorTrait::new(shape.span(), data.span()) +} From 4880eca2bc04bfd2319ef887510002208a045033 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Fri, 22 Mar 2024 18:15:56 +0100 Subject: [PATCH 41/68] update doc --- docs/framework/operators/tensor/tensor.reshape.md | 14 ++++++++++---- src/operators/tensor/core.cairo | 14 ++++++++++---- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/docs/framework/operators/tensor/tensor.reshape.md b/docs/framework/operators/tensor/tensor.reshape.md index b2c8f84eb..ed21f766e 100644 --- a/docs/framework/operators/tensor/tensor.reshape.md +++ b/docs/framework/operators/tensor/tensor.reshape.md @@ -1,15 +1,21 @@ # tensor.reshape ```rust - fn reshape(self: @Tensor, target_shape: Span) -> Tensor; + fn reshape(self: @Tensor, target_shape: Span, allowzero: bool) -> Tensor; ``` -Returns a new tensor with the specified target shape and the same data as the input tensor. +Reshape the input tensor similar to numpy.reshape. First input is the data tensor, second +input is a shape tensor which specifies the output shape. It outputs the reshaped tensor. +At most one dimension of the new shape can be -1. In this case, the value is inferred from +the size of the tensor and the remaining dimensions. A dimension could also be 0, in which case +the actual dimension value is unchanged (i.e. taken from the input tensor). If 'allowzero' is set, +and the new shape includes 0, the dimension will be set explicitly to zero (i.e. not taken from input tensor) ## Args * `self`(`@Tensor`) - The input tensor. -* `target_shape`(Span) - A span containing the target shape of the tensor. +* `target_shape`(Span) - A span containing the target shape of the tensor. +* `allowzero`(`bool`) - Indicates that if any value in the 'shape' input is set to zero, the zero value is honored, similar to NumPy. ## Panics @@ -32,7 +38,7 @@ fn reshape_tensor_example() -> Tensor { ); // We can call `reshape` function as follows. - return tensor.reshape(target_shape: array![2, 4].span()); + return tensor.reshape(target_shape: array![2, 4].span(), false); } >>> [[0,1,2,3], [4,5,6,7]] ``` diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 8e2178c79..2fe512bc7 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -558,15 +558,21 @@ trait TensorTrait { /// # tensor.reshape /// /// ```rust - /// fn reshape(self: @Tensor, target_shape: Span) -> Tensor; + /// fn reshape(self: @Tensor, target_shape: Span, allowzero: bool) -> Tensor; /// ``` /// - /// Returns a new tensor with the specified target shape and the same data as the input tensor. + /// Reshape the input tensor similar to numpy.reshape. First input is the data tensor, second + /// input is a shape tensor which specifies the output shape. It outputs the reshaped tensor. + /// At most one dimension of the new shape can be -1. In this case, the value is inferred from + /// the size of the tensor and the remaining dimensions. A dimension could also be 0, in which case + /// the actual dimension value is unchanged (i.e. taken from the input tensor). If 'allowzero' is set, + /// and the new shape includes 0, the dimension will be set explicitly to zero (i.e. not taken from input tensor) /// /// ## Args /// /// * `self`(`@Tensor`) - The input tensor. - /// * `target_shape`(Span) - A span containing the target shape of the tensor. + /// * `target_shape`(Span) - A span containing the target shape of the tensor. + /// * `allowzero`(`bool`) - Indicates that if any value in the 'shape' input is set to zero, the zero value is honored, similar to NumPy. /// /// ## Panics /// @@ -589,7 +595,7 @@ trait TensorTrait { /// ); /// /// // We can call `reshape` function as follows. - /// return tensor.reshape(target_shape: array![2, 4].span()); + /// return tensor.reshape(target_shape: array![2, 4].span(), false); /// } /// >>> [[0,1,2,3], [4,5,6,7]] /// ``` From 01312e32640d1d4986e6bcf1af85ccb73afd0d53 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Sat, 23 Mar 2024 08:24:47 +0100 Subject: [PATCH 42/68] refactor operator --- src/operators/tensor/core.cairo | 4 +- .../tensor/implementations/tensor_bool.cairo | 2 +- .../implementations/tensor_complex64.cairo | 2 +- .../implementations/tensor_fp16x16.cairo | 2 +- .../implementations/tensor_fp16x16wide.cairo | 2 +- .../implementations/tensor_fp32x32.cairo | 2 +- .../implementations/tensor_fp64x64.cairo | 2 +- .../implementations/tensor_fp8x23.cairo | 2 +- .../implementations/tensor_fp8x23wide.cairo | 2 +- .../tensor/implementations/tensor_i32.cairo | 2 +- .../tensor/implementations/tensor_i8.cairo | 2 +- .../tensor/implementations/tensor_u32.cairo | 2 +- .../tensor/math/gather_elements.cairo | 116 +++++++----------- 13 files changed, 58 insertions(+), 84 deletions(-) diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 2fe512bc7..46780007a 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -4230,7 +4230,7 @@ trait TensorTrait { /// # tensor.gather_elements /// /// ```rust - /// fn gather_elements(self: @Tensor, indices: Tensor, axis: Option) -> Tensor; + /// fn gather_elements(self: @Tensor, indices: Tensor, axis: Option) -> Tensor; /// ``` /// /// GatherElements is an indexing operation that produces its output by indexing into the input data tensor at index positions determined by elements of the indices tensor. @@ -4275,7 +4275,7 @@ trait TensorTrait { /// [7. 2. 3.]] /// ``` /// - fn gather_elements(self: @Tensor, indices: Tensor, axis: Option) -> Tensor; + fn gather_elements(self: @Tensor, indices: Tensor, axis: Option) -> Tensor; /// # tensor.binarizer /// /// ```rust diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index be929b5b2..165a3af29 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -369,7 +369,7 @@ impl BoolTensor of TensorTrait { } fn gather_elements( - self: @Tensor, indices: Tensor, axis: Option + self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { math::gather_elements::gather_elements(self, indices, axis) } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index a635bb84f..9f5e612d7 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -408,7 +408,7 @@ impl Complex64Tensor of TensorTrait { fn gather_elements( - self: @Tensor, indices: Tensor, axis: Option + self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { math::gather_elements::gather_elements(self, indices, axis) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index 846b9d73f..64ae522fd 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -441,7 +441,7 @@ impl FP16x16Tensor of TensorTrait { } fn gather_elements( - self: @Tensor, indices: Tensor, axis: Option + self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { math::gather_elements::gather_elements(self, indices, axis) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index ed87491c3..c18ffdf2e 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -411,7 +411,7 @@ impl FP16x16WTensor of TensorTrait { } fn gather_elements( - self: @Tensor, indices: Tensor, axis: Option + self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { math::gather_elements::gather_elements(self, indices, axis) } diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index f215f10f4..0bfef5890 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -438,7 +438,7 @@ impl FP32x32Tensor of TensorTrait { } fn gather_elements( - self: @Tensor, indices: Tensor, axis: Option + self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { math::gather_elements::gather_elements(self, indices, axis) } diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 9b8811486..3e29b3d35 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -438,7 +438,7 @@ impl FP64x64Tensor of TensorTrait { } fn gather_elements( - self: @Tensor, indices: Tensor, axis: Option + self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { math::gather_elements::gather_elements(self, indices, axis) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index 6e2e931e2..7927bc9cd 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -426,7 +426,7 @@ impl FP8x23Tensor of TensorTrait { } fn gather_elements( - self: @Tensor, indices: Tensor, axis: Option + self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { math::gather_elements::gather_elements(self, indices, axis) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index dcc27247b..e65e4cfda 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -393,7 +393,7 @@ impl FP8x23WTensor of TensorTrait { } fn gather_elements( - self: @Tensor, indices: Tensor, axis: Option + self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { math::gather_elements::gather_elements(self, indices, axis) } diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index d0c779e7b..f78e8c2b4 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -431,7 +431,7 @@ impl I32Tensor of TensorTrait { } fn gather_elements( - self: @Tensor, indices: Tensor, axis: Option + self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { math::gather_elements::gather_elements(self, indices, axis) } diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index e05b05d81..bcd2e40e4 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -434,7 +434,7 @@ impl I8Tensor of TensorTrait { } fn gather_elements( - self: @Tensor, indices: Tensor, axis: Option + self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { math::gather_elements::gather_elements(self, indices, axis) } diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index 0723f9cc8..daf9e9d32 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -378,7 +378,7 @@ impl U32Tensor of TensorTrait { } fn gather_elements( - self: @Tensor, indices: Tensor, axis: Option + self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { math::gather_elements::gather_elements(self, indices, axis) } diff --git a/src/operators/tensor/math/gather_elements.cairo b/src/operators/tensor/math/gather_elements.cairo index c3793a316..cc8b9ae20 100644 --- a/src/operators/tensor/math/gather_elements.cairo +++ b/src/operators/tensor/math/gather_elements.cairo @@ -1,115 +1,89 @@ use alexandria_data_structures::array_ext::SpanTraitExt; use orion::numbers::NumberTrait; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; +use orion::operators::tensor::{TensorTrait, Tensor}; -/// Cf: TensorTrait::gather docstring +/// Cf: TensorTrait::gather_elements docstring fn gather_elements, impl TCopy: Copy, impl TDrop: Drop,>( - self: @Tensor, indices: Tensor, axis: Option + self: @Tensor, indices: Tensor, axis: Option ) -> Tensor { - let axis = match axis { - Option::Some(val) => val, + let axis: usize = match axis { + Option::Some(val) => { + if val < 0 { + (((*self.shape).len()).try_into().unwrap() + val).try_into().unwrap() + } else { + val.try_into().unwrap() + } + }, Option::None => 0 }; assert(axis < (*self.shape).len(), 'axis out of dimensions'); - let data_rank = (*self.shape).len(); - let indices_rank = (indices.shape).len(); - assert((data_rank == indices_rank) & (indices_rank >= 1), 'must be same rank'); - let axis_shape = *(*self.shape).at(axis); - let ind_max = indices.data.max().unwrap(); - assert(ind_max < axis_shape, 'this index out of bounds'); - - let mut indices_shape = indices.shape; - let mut data_shape = *self.shape; - let mut data_shape_clone = data_shape.clone(); - let mut ind = 0; + // Adjust indices that are negative + let mut adjusted_indices = array![]; + let mut indices_data = indices.data.clone(); loop { - match data_shape.pop_front() { - Option::Some(val) => { - if (ind != axis) { - assert(*val == *indices_shape.at(ind), 'shape mismatch'); - } - ind += 1; + match indices_data.pop_front() { + Option::Some(index) => { + let adjusted_index: usize = if *index < 0 { + let val: u32 = (axis_shape.try_into().unwrap() + *index).try_into().unwrap(); + val + } else { + let val: u32 = (*index).try_into().unwrap(); + val + }; + assert(adjusted_index >= 0 && adjusted_index < axis_shape, 'Index out of bounds'); + adjusted_indices.append(adjusted_index); }, Option::None => { break; } }; }; let mut output_data = array![]; - - let mut outer_loop = data_shape_clone.at(axis); - let mut inner_loop = 1; + let mut data_shape_clone = (*self.shape).clone(); let mut multiplier = 1; + let mut looper = 1; let mut ind = 0; loop { match data_shape_clone.pop_front() { Option::Some(val) => { - inner_loop *= *val; - if (ind >= axis) { + if ind >= axis { multiplier *= *val; } - - ind += 1; - }, - Option::None => { break; } - }; - }; - - let looper = multiplier / *outer_loop; - - if inner_loop != 1 { - inner_loop /= *outer_loop; - } - - let mut multiplier_index = 1; - let mut outer_loop_index = indices_shape.at(axis); - let mut ind = 0; - loop { - match indices_shape.pop_front() { - Option::Some(val) => { - if (ind >= axis) { - multiplier_index *= *val; + if ind > axis { + looper *= *val; } - ind += 1; }, Option::None => { break; } }; }; - let mut data_indices = indices.data; + let inner_loop = multiplier / axis_shape; + let mut adjusted_indices_iter = adjusted_indices.clone(); + let mut i: usize = 0; loop { - match data_indices.pop_front() { - Option::Some(val) => { - if (axis == 0) { - let value = *val * inner_loop.into() + (i % inner_loop); - output_data.append(*self.data[value]); - } - - if ((axis == indices_rank - 1) & (axis != 0)) { - let value = *val + *outer_loop * (i / *outer_loop_index); - output_data.append(*self.data[value]); - } - - if ((axis != indices_rank - 1) & (axis != 0)) { - let value = *val * (looper) + match adjusted_indices_iter.pop_front() { + Option::Some(indice) => { + let value = if axis == 0 { + indice * inner_loop + (i % inner_loop) + } else if axis == (*self.shape).len() - 1 { + indice + axis_shape * (i / axis_shape) + } else { + indice * looper + (i % looper) - + (multiplier * (i / multiplier_index)); - output_data.append(*self.data[value]); - } + + (multiplier / axis_shape) * (i / (multiplier / axis_shape)) + }; + output_data.append(*self.data[value]); i += 1; }, Option::None => { break; } }; }; - let mut output_tensor = TensorTrait::::new(indices.shape, output_data.span()); - - output_tensor + TensorTrait::::new(indices.shape, output_data.span()) } From d2c621fd7f1be314e175a3af066f619030f4af7b Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Sat, 23 Mar 2024 09:10:05 +0100 Subject: [PATCH 43/68] test operator --- nodegen/node/gather_elements.py | 316 +++------------ tests/nodes.cairo | 19 +- ...xis1.cairo => gather_elements_axis1.cairo} | 16 +- .../nodes/gather_elements_axis1/input_0.cairo | 17 + .../input_1.cairo | 14 +- .../gather_elements_axis1/output_0.cairo | 17 + ...xis2.cairo => gather_elements_axis2.cairo} | 16 +- .../input_0.cairo | 2 +- .../input_1.cairo | 27 +- .../output_0.cairo} | 30 +- ...lt.cairo => gather_elements_default.cairo} | 16 +- .../input_0.cairo | 2 +- .../input_1.cairo | 15 +- .../output_0.cairo | 28 +- .../input_1.cairo | 40 -- .../output_0.cairo | 41 -- .../output_0.cairo | 41 -- .../input_1.cairo | 40 -- .../gather_elements_fp8x23_3d_axis1.cairo | 24 -- .../input_0.cairo | 41 -- .../input_1.cairo | 40 -- .../output_0.cairo | 41 -- .../gather_elements_fp8x23_3d_axis2.cairo | 24 -- .../input_0.cairo | 41 -- .../input_1.cairo | 40 -- .../output_0.cairo | 41 -- .../gather_elements_fp8x23_3d_default.cairo | 24 -- .../input_0.cairo | 41 -- .../output_0.cairo | 41 -- .../nodes/gather_elements_i32_3d_axis1.cairo | 24 -- .../input_0.cairo | 37 -- .../input_1.cairo | 49 --- .../output_0.cairo | 49 --- .../nodes/gather_elements_i32_3d_axis2.cairo | 24 -- .../input_0.cairo | 37 -- .../input_1.cairo | 45 --- .../output_0.cairo | 45 --- .../input_0.cairo | 37 -- .../input_1.cairo | 43 -- .../output_0.cairo | 43 -- tests/nodes/gather_elements_i8_3d_axis1.cairo | 24 -- .../gather_elements_i8_3d_axis1/input_0.cairo | 21 - .../output_0.cairo | 21 - .../nodes/gather_elements_i8_3d_default.cairo | 24 -- .../input_0.cairo | 21 - .../output_0.cairo | 21 - ...=> gather_elements_negative_indices.cairo} | 14 +- .../input_0.cairo | 22 ++ .../input_1.cairo | 16 +- .../output_0.cairo | 19 + tests/nodes/gather_elements_u32_axis1.cairo | 22 -- .../gather_elements_u32_axis1/input_0.cairo | 122 ------ .../gather_elements_u32_axis1/input_1.cairo | 194 --------- .../gather_elements_u32_axis1/output_0.cairo | 194 --------- tests/nodes/gather_elements_u32_axis2.cairo | 22 -- .../gather_elements_u32_axis2/input_0.cairo | 122 ------ .../gather_elements_u32_axis2/input_1.cairo | 122 ------ .../gather_elements_u32_axis2/output_0.cairo | 122 ------ tests/nodes/gather_elements_u32_axis3.cairo | 22 -- .../gather_elements_u32_axis3/input_0.cairo | 122 ------ .../gather_elements_u32_axis3/input_1.cairo | 230 ----------- .../gather_elements_u32_axis3/output_0.cairo | 230 ----------- tests/nodes/gather_elements_u32_default.cairo | 22 -- .../gather_elements_u32_default/input_0.cairo | 122 ------ .../gather_elements_u32_default/input_1.cairo | 374 ------------------ .../output_0.cairo | 374 ------------------ 66 files changed, 239 insertions(+), 3908 deletions(-) rename tests/nodes/{gather_elements_fp16x16_3d_axis1.cairo => gather_elements_axis1.cairo} (52%) create mode 100644 tests/nodes/gather_elements_axis1/input_0.cairo rename tests/nodes/{gather_elements_i8_3d_axis1 => gather_elements_axis1}/input_1.cairo (57%) create mode 100644 tests/nodes/gather_elements_axis1/output_0.cairo rename tests/nodes/{gather_elements_fp16x16_3d_axis2.cairo => gather_elements_axis2.cairo} (52%) rename tests/nodes/{gather_elements_fp16x16_3d_axis2 => gather_elements_axis2}/input_0.cairo (96%) rename tests/nodes/{gather_elements_fp16x16_3d_axis2 => gather_elements_axis2}/input_1.cairo (83%) rename tests/nodes/{gather_elements_fp16x16_3d_default/input_0.cairo => gather_elements_axis2/output_0.cairo} (71%) rename tests/nodes/{gather_elements_fp16x16_3d_default.cairo => gather_elements_default.cairo} (52%) rename tests/nodes/{gather_elements_fp16x16_3d_axis1 => gather_elements_default}/input_0.cairo (96%) rename tests/nodes/{gather_elements_fp8x23_3d_default => gather_elements_default}/input_1.cairo (87%) rename tests/nodes/{gather_elements_fp16x16_3d_default => gather_elements_default}/output_0.cairo (70%) delete mode 100644 tests/nodes/gather_elements_fp16x16_3d_axis1/input_1.cairo delete mode 100644 tests/nodes/gather_elements_fp16x16_3d_axis1/output_0.cairo delete mode 100644 tests/nodes/gather_elements_fp16x16_3d_axis2/output_0.cairo delete mode 100644 tests/nodes/gather_elements_fp16x16_3d_default/input_1.cairo delete mode 100644 tests/nodes/gather_elements_fp8x23_3d_axis1.cairo delete mode 100644 tests/nodes/gather_elements_fp8x23_3d_axis1/input_0.cairo delete mode 100644 tests/nodes/gather_elements_fp8x23_3d_axis1/input_1.cairo delete mode 100644 tests/nodes/gather_elements_fp8x23_3d_axis1/output_0.cairo delete mode 100644 tests/nodes/gather_elements_fp8x23_3d_axis2.cairo delete mode 100644 tests/nodes/gather_elements_fp8x23_3d_axis2/input_0.cairo delete mode 100644 tests/nodes/gather_elements_fp8x23_3d_axis2/input_1.cairo delete mode 100644 tests/nodes/gather_elements_fp8x23_3d_axis2/output_0.cairo delete mode 100644 tests/nodes/gather_elements_fp8x23_3d_default.cairo delete mode 100644 tests/nodes/gather_elements_fp8x23_3d_default/input_0.cairo delete mode 100644 tests/nodes/gather_elements_fp8x23_3d_default/output_0.cairo delete mode 100644 tests/nodes/gather_elements_i32_3d_axis1.cairo delete mode 100644 tests/nodes/gather_elements_i32_3d_axis1/input_0.cairo delete mode 100644 tests/nodes/gather_elements_i32_3d_axis1/input_1.cairo delete mode 100644 tests/nodes/gather_elements_i32_3d_axis1/output_0.cairo delete mode 100644 tests/nodes/gather_elements_i32_3d_axis2.cairo delete mode 100644 tests/nodes/gather_elements_i32_3d_axis2/input_0.cairo delete mode 100644 tests/nodes/gather_elements_i32_3d_axis2/input_1.cairo delete mode 100644 tests/nodes/gather_elements_i32_3d_axis2/output_0.cairo delete mode 100644 tests/nodes/gather_elements_i32_3d_default/input_0.cairo delete mode 100644 tests/nodes/gather_elements_i32_3d_default/input_1.cairo delete mode 100644 tests/nodes/gather_elements_i32_3d_default/output_0.cairo delete mode 100644 tests/nodes/gather_elements_i8_3d_axis1.cairo delete mode 100644 tests/nodes/gather_elements_i8_3d_axis1/input_0.cairo delete mode 100644 tests/nodes/gather_elements_i8_3d_axis1/output_0.cairo delete mode 100644 tests/nodes/gather_elements_i8_3d_default.cairo delete mode 100644 tests/nodes/gather_elements_i8_3d_default/input_0.cairo delete mode 100644 tests/nodes/gather_elements_i8_3d_default/output_0.cairo rename tests/nodes/{gather_elements_i32_3d_default.cairo => gather_elements_negative_indices.cairo} (51%) create mode 100644 tests/nodes/gather_elements_negative_indices/input_0.cairo rename tests/nodes/{gather_elements_i8_3d_default => gather_elements_negative_indices}/input_1.cairo (57%) create mode 100644 tests/nodes/gather_elements_negative_indices/output_0.cairo delete mode 100644 tests/nodes/gather_elements_u32_axis1.cairo delete mode 100644 tests/nodes/gather_elements_u32_axis1/input_0.cairo delete mode 100644 tests/nodes/gather_elements_u32_axis1/input_1.cairo delete mode 100644 tests/nodes/gather_elements_u32_axis1/output_0.cairo delete mode 100644 tests/nodes/gather_elements_u32_axis2.cairo delete mode 100644 tests/nodes/gather_elements_u32_axis2/input_0.cairo delete mode 100644 tests/nodes/gather_elements_u32_axis2/input_1.cairo delete mode 100644 tests/nodes/gather_elements_u32_axis2/output_0.cairo delete mode 100644 tests/nodes/gather_elements_u32_axis3.cairo delete mode 100644 tests/nodes/gather_elements_u32_axis3/input_0.cairo delete mode 100644 tests/nodes/gather_elements_u32_axis3/input_1.cairo delete mode 100644 tests/nodes/gather_elements_u32_axis3/output_0.cairo delete mode 100644 tests/nodes/gather_elements_u32_default.cairo delete mode 100644 tests/nodes/gather_elements_u32_default/input_0.cairo delete mode 100644 tests/nodes/gather_elements_u32_default/input_1.cairo delete mode 100644 tests/nodes/gather_elements_u32_default/output_0.cairo diff --git a/nodegen/node/gather_elements.py b/nodegen/node/gather_elements.py index 604a666c7..a977212a7 100644 --- a/nodegen/node/gather_elements.py +++ b/nodegen/node/gather_elements.py @@ -13,256 +13,68 @@ class Gather_elements(RunAll): @staticmethod def gather_elements_fp16x16(): - def gather_elements_3D(): - def default(): - x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) - x2 = np.random.randint(low = 0,high=2, size=(3,3,3)).astype(np.uint32) - y = gather_elements(x1, x2, axis=0) + def default(): + x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) + x2 = np.random.randint(low = 0,high=2, size=(3,3,3)).astype(np.uint32) + y = gather_elements(x1, x2, axis=0) + + x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) + x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "gather_elements_default" + make_test( + inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(0))", + name= name) + + def axis1(): + x1 = np.array([[1, 2], [3, 4]], dtype=np.float32) + x2 = np.array([[0, 0], [1, 0]], dtype=np.int32) + y = gather_elements(x1, x2, axis=1) + + x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) + x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "gather_elements_axis1" + make_test( + inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(1))", + name= name) + + def axis2(): + x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) + x2 = np.random.randint(low = 0,high=3, size=(3,3,3)).astype(np.uint32) + y = gather_elements(x1, x2, axis=2) + + x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) + x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "gather_elements_axis2" + make_test( + inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(2))", + name= name) + + def negative_indices(): + x1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32) + x2 = np.array([[-1, -2, 0], [-2, 0, 0]], dtype=np.int32) + y = gather_elements(x1, x2, axis=0) + + x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) + x2 = Tensor(Dtype.I32, x2.shape, x2.flatten()) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + + name = "gather_elements_negative_indices" + make_test( + inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(0))", + name= name) + + default() + axis1() + axis2() + negative_indices() - x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "gather_elements_fp16x16_3d_default" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(0))", - name= name) - - def axis1(): - x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) - x2 = np.random.randint(low = 0,high=3, size=(3,3,3)).astype(np.uint32) - y = gather_elements(x1, x2, axis=1) - - x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "gather_elements_fp16x16_3d_axis1" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(1))", - name= name) - - def axis2(): - x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) - x2 = np.random.randint(low = 0,high=3, size=(3,3,3)).astype(np.uint32) - y = gather_elements(x1, x2, axis=2) - - x1 = Tensor(Dtype.FP16x16, x1.shape, to_fp(x1.flatten(), FixedImpl.FP16x16)) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "gather_elements_fp16x16_3d_axis2" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(2))", - name= name) - - default() - axis1() - axis2() - gather_elements_3D() - - - @staticmethod - def gather_elements_fp8x23(): - def gather_elements_3D(): - def default(): - x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) - x2 = np.random.randint(low = 0,high=2, size=(3,3,3)).astype(np.int64) - y = gather_elements(x1, x2, axis=0) - - x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) - - name = "gather_elements_fp8x23_3d_default" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(0))", - name= name) - - def axis1(): - x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) - x2 = np.random.randint(low = 0,high=3, size=(3,3,3)).astype(np.int64) - y = gather_elements(x1, x2, axis=1) - - x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) - - name = "gather_elements_fp8x23_3d_axis1" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(1))", - name= name) - - def axis2(): - x1 = np.arange(0,27).reshape(3,3,3).astype(np.int64) - x2 = np.random.randint(low = 0,high=3, size=(3,3,3)).astype(np.int64) - y = gather_elements(x1, x2, axis=2) - - x1 = Tensor(Dtype.FP8x23, x1.shape, to_fp(x1.flatten(), FixedImpl.FP8x23)) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.FP8x23, y.shape, to_fp(y.flatten(), FixedImpl.FP8x23)) - - name = "gather_elements_fp8x23_3d_axis2" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(2))", - name= name) - - default() - axis1() - axis2() - gather_elements_3D() - - - @staticmethod - def gather_elements_i8(): - def gather_elements_3D(): - def default(): - x1 = np.arange(0,9).reshape(3,3).astype(np.int8) - x2 = np.random.randint(low = 0,high=2, size=(3,3)).astype(np.int8) - y = gather_elements(x1, x2, axis=0) - - x1 = Tensor(Dtype.I8, x1.shape, x1.flatten()) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.I8, y.shape, y.flatten()) - - name = "gather_elements_i8_3d_default" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(0))", - name= name) - - def axis1(): - x1 = np.arange(0,9).reshape(3,3).astype(np.int8) - x2 = np.random.randint(low = 0,high=2, size=(3,3)).astype(np.int8) - y = gather_elements(x1, x2, axis=1) - - x1 = Tensor(Dtype.I8, x1.shape, x1.flatten()) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.I8, y.shape, y.flatten()) - - name = "gather_elements_i8_3d_axis1" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(1))", - name= name) - - default() - axis1() - gather_elements_3D() - - - @staticmethod - def gather_elements_i32(): - def gather_elements_3D(): - def default(): - x1 = np.arange(0,24).reshape(4,2,3).astype(np.int32) - x2 = np.random.randint(low = 0,high=2, size=(5,2,3)).astype(np.int32) - y = gather_elements(x1, x2, axis=0) - - x1 = Tensor(Dtype.I32, x1.shape, x1.flatten()) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "gather_elements_i32_3d_default" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(0))", - name= name) - - def axis1(): - x1 = np.arange(0,24).reshape(4,2,3).astype(np.int32) - x2 = np.random.randint(low = 0,high=2, size=(4,3,3)).astype(np.int32) - y = gather_elements(x1, x2, axis=1) - - x1 = Tensor(Dtype.I32, x1.shape, x1.flatten()) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "gather_elements_i32_3d_axis1" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(1))", - name= name) - - def axis2(): - x1 = np.arange(0,24).reshape(4,2,3).astype(np.int32) - x2 = np.random.randint(low = 0,high=2, size=(4,2,4)).astype(np.int32) - y = gather_elements(x1, x2, axis=2) - - x1 = Tensor(Dtype.I32, x1.shape, x1.flatten()) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "gather_elements_i32_3d_axis2" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(2))", - name= name) - - default() - axis1() - axis2() - gather_elements_3D() - - @staticmethod - def gather_elements_u32(): - def gather_elements_3D(): - def default(): - x1 = np.arange(0,108).reshape(3,3,4,3).astype(np.int32) - x2 = np.random.randint(low = 0,high=3, size=(10,3,4,3)).astype(np.int32) - y = gather_elements(x1, x2, axis=0) - - x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "gather_elements_u32_default" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(0))", - name= name) - - def axis1(): - x1 = np.arange(0,108).reshape(3,3,4,3).astype(np.int32) - x2 = np.random.randint(low = 0,high=3, size=(3,5,4,3)).astype(np.int32) - y = gather_elements(x1, x2, axis=1) - - x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "gather_elements_u32_axis1" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(1))", - name= name) - - def axis2(): - x1 = np.arange(0,108).reshape(3,3,4,3).astype(np.int32) - x2 = np.random.randint(low = 0,high=3, size=(3,3,4,3)).astype(np.int32) - y = gather_elements(x1, x2, axis=2) - - x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "gather_elements_u32_axis2" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(2))", - name= name) - - def axis3(): - x1 = np.arange(0,108).reshape(3,3,4,3).astype(np.int32) - x2 = np.random.randint(low = 0,high=3, size=(3,3,4,6)).astype(np.int32) - y = gather_elements(x1, x2, axis=3) - - x1 = Tensor(Dtype.U32, x1.shape, x1.flatten()) - x2 = Tensor(Dtype.U32, x2.shape, x2.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "gather_elements_u32_axis3" - make_test( - inputs = [x1, x2], output = y, func_sig = "input_0.gather_elements(indices:input_1, axis:Option::Some(3))", - name= name) - - default() - axis1() - axis2() - axis3() - gather_elements_3D() \ No newline at end of file diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 5d48a9b62..122cb8d28 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -651,21 +651,6 @@ // mod reduce_prod_u32_2D_default; // mod reduce_prod_u32_2D_keepdims; // mod reduce_prod_u32_2D_axis_1; -// mod gather_elements_fp16x16_3d_default; -// mod gather_elements_fp16x16_3d_axis1; -// mod gather_elements_fp16x16_3d_axis2; -// mod gather_elements_fp8x23_3d_default; -// mod gather_elements_fp8x23_3d_axis1; -// mod gather_elements_fp8x23_3d_axis2; -// mod gather_elements_i8_3d_default; -// mod gather_elements_i8_3d_axis1; -// mod gather_elements_i32_3d_default; -// mod gather_elements_i32_3d_axis1; -// mod gather_elements_i32_3d_axis2; -// mod gather_elements_u32_default; -// mod gather_elements_u32_axis1; -// mod gather_elements_u32_axis2; -// mod gather_elements_u32_axis3; // mod sequence_length_fp16x16; // mod sequence_length_fp16x16_broadcast; // mod sequence_length_fp8x23; @@ -1046,3 +1031,7 @@ mod reshape_reordered_all_dims; mod reshape_reordered_last_dims; mod reshape_zero_and_negative_dim; mod reshape_zero_dim; +mod gather_elements_default; +mod gather_elements_axis1; +mod gather_elements_axis2; +mod gather_elements_negative_indices; diff --git a/tests/nodes/gather_elements_fp16x16_3d_axis1.cairo b/tests/nodes/gather_elements_axis1.cairo similarity index 52% rename from tests/nodes/gather_elements_fp16x16_3d_axis1.cairo rename to tests/nodes/gather_elements_axis1.cairo index 6107612c7..82b08e271 100644 --- a/tests/nodes/gather_elements_fp16x16_3d_axis1.cairo +++ b/tests/nodes/gather_elements_axis1.cairo @@ -4,21 +4,21 @@ mod output_0; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::FP16x16TensorPartialEq; #[test] #[available_gas(2000000000)] -fn test_gather_elements_fp16x16_3d_axis1() { +fn test_gather_elements_axis1() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.gather_elements(indices: input_1, axis: Option::Some(1)); + let y_0 = input_0.gather_elements(indices:input_1, axis:Option::Some(1)); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_elements_axis1/input_0.cairo b/tests/nodes/gather_elements_axis1/input_0.cairo new file mode 100644 index 000000000..d74280ae9 --- /dev/null +++ b/tests/nodes/gather_elements_axis1/input_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/gather_elements_i8_3d_axis1/input_1.cairo b/tests/nodes/gather_elements_axis1/input_1.cairo similarity index 57% rename from tests/nodes/gather_elements_i8_3d_axis1/input_1.cairo rename to tests/nodes/gather_elements_axis1/input_1.cairo index 477bcfe1d..ef76c8bf4 100644 --- a/tests/nodes/gather_elements_i8_3d_axis1/input_1.cairo +++ b/tests/nodes/gather_elements_axis1/input_1.cairo @@ -1,21 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn input_1() -> Tensor { +fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); + shape.append(2); + shape.append(2); let mut data = ArrayTrait::new(); data.append(0); - data.append(1); - data.append(0); data.append(0); data.append(1); data.append(0); - data.append(1); - data.append(1); - data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/gather_elements_axis1/output_0.cairo b/tests/nodes/gather_elements_axis1/output_0.cairo new file mode 100644 index 000000000..440047d6e --- /dev/null +++ b/tests/nodes/gather_elements_axis1/output_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/gather_elements_fp16x16_3d_axis2.cairo b/tests/nodes/gather_elements_axis2.cairo similarity index 52% rename from tests/nodes/gather_elements_fp16x16_3d_axis2.cairo rename to tests/nodes/gather_elements_axis2.cairo index e44e6b6c1..0e0b7caea 100644 --- a/tests/nodes/gather_elements_fp16x16_3d_axis2.cairo +++ b/tests/nodes/gather_elements_axis2.cairo @@ -4,21 +4,21 @@ mod output_0; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::FP16x16TensorPartialEq; #[test] #[available_gas(2000000000)] -fn test_gather_elements_fp16x16_3d_axis2() { +fn test_gather_elements_axis2() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.gather_elements(indices: input_1, axis: Option::Some(2)); + let y_0 = input_0.gather_elements(indices:input_1, axis:Option::Some(2)); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_elements_fp16x16_3d_axis2/input_0.cairo b/tests/nodes/gather_elements_axis2/input_0.cairo similarity index 96% rename from tests/nodes/gather_elements_fp16x16_3d_axis2/input_0.cairo rename to tests/nodes/gather_elements_axis2/input_0.cairo index 2417c999e..5d8deafac 100644 --- a/tests/nodes/gather_elements_fp16x16_3d_axis2/input_0.cairo +++ b/tests/nodes/gather_elements_axis2/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { diff --git a/tests/nodes/gather_elements_fp16x16_3d_axis2/input_1.cairo b/tests/nodes/gather_elements_axis2/input_1.cairo similarity index 83% rename from tests/nodes/gather_elements_fp16x16_3d_axis2/input_1.cairo rename to tests/nodes/gather_elements_axis2/input_1.cairo index d2c0a00b1..61f544307 100644 --- a/tests/nodes/gather_elements_fp16x16_3d_axis2/input_1.cairo +++ b/tests/nodes/gather_elements_axis2/input_1.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn input_1() -> Tensor { +fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(2); + data.append(1); + data.append(0); data.append(0); data.append(2); data.append(0); - data.append(1); data.append(0); - data.append(2); data.append(0); - data.append(1); - data.append(1); - data.append(2); data.append(2); data.append(0); - data.append(2); + data.append(1); data.append(0); data.append(2); - data.append(2); + data.append(1); data.append(0); data.append(2); - data.append(1); data.append(0); data.append(0); data.append(1); - data.append(0); + data.append(2); + data.append(2); + data.append(2); data.append(1); - data.append(0); + data.append(2); + data.append(2); + data.append(2); + data.append(2); data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/gather_elements_fp16x16_3d_default/input_0.cairo b/tests/nodes/gather_elements_axis2/output_0.cairo similarity index 71% rename from tests/nodes/gather_elements_fp16x16_3d_default/input_0.cairo rename to tests/nodes/gather_elements_axis2/output_0.cairo index 2417c999e..51cbca04f 100644 --- a/tests/nodes/gather_elements_fp16x16_3d_default/input_0.cairo +++ b/tests/nodes/gather_elements_axis2/output_0.cairo @@ -1,41 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; -fn input_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: false }); - data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); data.append(FP16x16 { mag: 393216, sign: false }); - data.append(FP16x16 { mag: 458752, sign: false }); data.append(FP16x16 { mag: 524288, sign: false }); - data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); data.append(FP16x16 { mag: 720896, sign: false }); - data.append(FP16x16 { mag: 786432, sign: false }); data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); data.append(FP16x16 { mag: 917504, sign: false }); data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); data.append(FP16x16 { mag: 1048576, sign: false }); - data.append(FP16x16 { mag: 1114112, sign: false }); - data.append(FP16x16 { mag: 1179648, sign: false }); - data.append(FP16x16 { mag: 1245184, sign: false }); data.append(FP16x16 { mag: 1310720, sign: false }); - data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); data.append(FP16x16 { mag: 1441792, sign: false }); data.append(FP16x16 { mag: 1507328, sign: false }); - data.append(FP16x16 { mag: 1572864, sign: false }); - data.append(FP16x16 { mag: 1638400, sign: false }); + data.append(FP16x16 { mag: 1507328, sign: false }); data.append(FP16x16 { mag: 1703936, sign: false }); + data.append(FP16x16 { mag: 1703936, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/gather_elements_fp16x16_3d_default.cairo b/tests/nodes/gather_elements_default.cairo similarity index 52% rename from tests/nodes/gather_elements_fp16x16_3d_default.cairo rename to tests/nodes/gather_elements_default.cairo index 7d1c54b1e..9d1a099c1 100644 --- a/tests/nodes/gather_elements_fp16x16_3d_default.cairo +++ b/tests/nodes/gather_elements_default.cairo @@ -4,21 +4,21 @@ mod output_0; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::FP16x16TensorPartialEq; #[test] #[available_gas(2000000000)] -fn test_gather_elements_fp16x16_3d_default() { +fn test_gather_elements_default() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.gather_elements(indices: input_1, axis: Option::Some(0)); + let y_0 = input_0.gather_elements(indices:input_1, axis:Option::Some(0)); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_elements_fp16x16_3d_axis1/input_0.cairo b/tests/nodes/gather_elements_default/input_0.cairo similarity index 96% rename from tests/nodes/gather_elements_fp16x16_3d_axis1/input_0.cairo rename to tests/nodes/gather_elements_default/input_0.cairo index 2417c999e..5d8deafac 100644 --- a/tests/nodes/gather_elements_fp16x16_3d_axis1/input_0.cairo +++ b/tests/nodes/gather_elements_default/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { diff --git a/tests/nodes/gather_elements_fp8x23_3d_default/input_1.cairo b/tests/nodes/gather_elements_default/input_1.cairo similarity index 87% rename from tests/nodes/gather_elements_fp8x23_3d_default/input_1.cairo rename to tests/nodes/gather_elements_default/input_1.cairo index 63526f247..707ba986d 100644 --- a/tests/nodes/gather_elements_fp8x23_3d_default/input_1.cairo +++ b/tests/nodes/gather_elements_default/input_1.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn input_1() -> Tensor { +fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); @@ -11,6 +12,8 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); data.append(0); + data.append(0); + data.append(1); data.append(1); data.append(0); data.append(0); @@ -20,21 +23,19 @@ fn input_1() -> Tensor { data.append(1); data.append(1); data.append(1); - data.append(0); - data.append(1); - data.append(1); data.append(1); data.append(0); data.append(1); data.append(0); data.append(0); - data.append(0); data.append(1); data.append(0); data.append(0); - data.append(1); data.append(0); data.append(0); data.append(0); + data.append(0); + data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/gather_elements_fp16x16_3d_default/output_0.cairo b/tests/nodes/gather_elements_default/output_0.cairo similarity index 70% rename from tests/nodes/gather_elements_fp16x16_3d_default/output_0.cairo rename to tests/nodes/gather_elements_default/output_0.cairo index d37ee87d6..0fb918722 100644 --- a/tests/nodes/gather_elements_fp16x16_3d_default/output_0.cairo +++ b/tests/nodes/gather_elements_default/output_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn output_0() -> Tensor { @@ -10,31 +10,31 @@ fn output_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 786432, sign: false }); data.append(FP16x16 { mag: 851968, sign: false }); - data.append(FP16x16 { mag: 917504, sign: false }); - data.append(FP16x16 { mag: 983040, sign: false }); - data.append(FP16x16 { mag: 1048576, sign: false }); - data.append(FP16x16 { mag: 1114112, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 655360, sign: false }); data.append(FP16x16 { mag: 720896, sign: false }); data.append(FP16x16 { mag: 786432, sign: false }); data.append(FP16x16 { mag: 851968, sign: false }); - data.append(FP16x16 { mag: 917504, sign: false }); - data.append(FP16x16 { mag: 393216, sign: false }); - data.append(FP16x16 { mag: 1048576, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); data.append(FP16x16 { mag: 524288, sign: false }); data.append(FP16x16 { mag: 589824, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 786432, sign: false }); - data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); data.append(FP16x16 { mag: 327680, sign: false }); - data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); data.append(FP16x16 { mag: 1048576, sign: false }); data.append(FP16x16 { mag: 1114112, sign: false }); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/gather_elements_fp16x16_3d_axis1/input_1.cairo b/tests/nodes/gather_elements_fp16x16_3d_axis1/input_1.cairo deleted file mode 100644 index a8dddea3d..000000000 --- a/tests/nodes/gather_elements_fp16x16_3d_axis1/input_1.cairo +++ /dev/null @@ -1,40 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; - -fn input_1() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(2); - data.append(0); - data.append(0); - data.append(0); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(2); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(2); - data.append(2); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(2); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_fp16x16_3d_axis1/output_0.cairo b/tests/nodes/gather_elements_fp16x16_3d_axis1/output_0.cairo deleted file mode 100644 index 843805c3c..000000000 --- a/tests/nodes/gather_elements_fp16x16_3d_axis1/output_0.cairo +++ /dev/null @@ -1,41 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 458752, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 524288, sign: false }); - data.append(FP16x16 { mag: 196608, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 524288, sign: false }); - data.append(FP16x16 { mag: 786432, sign: false }); - data.append(FP16x16 { mag: 851968, sign: false }); - data.append(FP16x16 { mag: 917504, sign: false }); - data.append(FP16x16 { mag: 589824, sign: false }); - data.append(FP16x16 { mag: 1048576, sign: false }); - data.append(FP16x16 { mag: 917504, sign: false }); - data.append(FP16x16 { mag: 589824, sign: false }); - data.append(FP16x16 { mag: 655360, sign: false }); - data.append(FP16x16 { mag: 917504, sign: false }); - data.append(FP16x16 { mag: 1179648, sign: false }); - data.append(FP16x16 { mag: 1638400, sign: false }); - data.append(FP16x16 { mag: 1703936, sign: false }); - data.append(FP16x16 { mag: 1376256, sign: false }); - data.append(FP16x16 { mag: 1245184, sign: false }); - data.append(FP16x16 { mag: 1310720, sign: false }); - data.append(FP16x16 { mag: 1179648, sign: false }); - data.append(FP16x16 { mag: 1638400, sign: false }); - data.append(FP16x16 { mag: 1703936, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_fp16x16_3d_axis2/output_0.cairo b/tests/nodes/gather_elements_fp16x16_3d_axis2/output_0.cairo deleted file mode 100644 index fe0cb4f6b..000000000 --- a/tests/nodes/gather_elements_fp16x16_3d_axis2/output_0.cairo +++ /dev/null @@ -1,41 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: false }); - data.append(FP16x16 { mag: 262144, sign: false }); - data.append(FP16x16 { mag: 196608, sign: false }); - data.append(FP16x16 { mag: 524288, sign: false }); - data.append(FP16x16 { mag: 393216, sign: false }); - data.append(FP16x16 { mag: 458752, sign: false }); - data.append(FP16x16 { mag: 655360, sign: false }); - data.append(FP16x16 { mag: 720896, sign: false }); - data.append(FP16x16 { mag: 720896, sign: false }); - data.append(FP16x16 { mag: 786432, sign: false }); - data.append(FP16x16 { mag: 917504, sign: false }); - data.append(FP16x16 { mag: 786432, sign: false }); - data.append(FP16x16 { mag: 1114112, sign: false }); - data.append(FP16x16 { mag: 1114112, sign: false }); - data.append(FP16x16 { mag: 983040, sign: false }); - data.append(FP16x16 { mag: 1310720, sign: false }); - data.append(FP16x16 { mag: 1245184, sign: false }); - data.append(FP16x16 { mag: 1179648, sign: false }); - data.append(FP16x16 { mag: 1376256, sign: false }); - data.append(FP16x16 { mag: 1441792, sign: false }); - data.append(FP16x16 { mag: 1376256, sign: false }); - data.append(FP16x16 { mag: 1638400, sign: false }); - data.append(FP16x16 { mag: 1572864, sign: false }); - data.append(FP16x16 { mag: 1572864, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_fp16x16_3d_default/input_1.cairo b/tests/nodes/gather_elements_fp16x16_3d_default/input_1.cairo deleted file mode 100644 index a8ee056f2..000000000 --- a/tests/nodes/gather_elements_fp16x16_3d_default/input_1.cairo +++ /dev/null @@ -1,40 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; - -fn input_1() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_fp8x23_3d_axis1.cairo b/tests/nodes/gather_elements_fp8x23_3d_axis1.cairo deleted file mode 100644 index fbecd8cd1..000000000 --- a/tests/nodes/gather_elements_fp8x23_3d_axis1.cairo +++ /dev/null @@ -1,24 +0,0 @@ -mod input_0; -mod input_1; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_gather_elements_fp8x23_3d_axis1() { - let input_0 = input_0::input_0(); - let input_1 = input_1::input_1(); - let z = output_0::output_0(); - - let y = input_0.gather_elements(indices: input_1, axis: Option::Some(1)); - - assert_eq(y, z); -} diff --git a/tests/nodes/gather_elements_fp8x23_3d_axis1/input_0.cairo b/tests/nodes/gather_elements_fp8x23_3d_axis1/input_0.cairo deleted file mode 100644 index ed60e2f46..000000000 --- a/tests/nodes/gather_elements_fp8x23_3d_axis1/input_0.cairo +++ /dev/null @@ -1,41 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - data.append(FP8x23 { mag: 41943040, sign: false }); - data.append(FP8x23 { mag: 50331648, sign: false }); - data.append(FP8x23 { mag: 58720256, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 75497472, sign: false }); - data.append(FP8x23 { mag: 83886080, sign: false }); - data.append(FP8x23 { mag: 92274688, sign: false }); - data.append(FP8x23 { mag: 100663296, sign: false }); - data.append(FP8x23 { mag: 109051904, sign: false }); - data.append(FP8x23 { mag: 117440512, sign: false }); - data.append(FP8x23 { mag: 125829120, sign: false }); - data.append(FP8x23 { mag: 134217728, sign: false }); - data.append(FP8x23 { mag: 142606336, sign: false }); - data.append(FP8x23 { mag: 150994944, sign: false }); - data.append(FP8x23 { mag: 159383552, sign: false }); - data.append(FP8x23 { mag: 167772160, sign: false }); - data.append(FP8x23 { mag: 176160768, sign: false }); - data.append(FP8x23 { mag: 184549376, sign: false }); - data.append(FP8x23 { mag: 192937984, sign: false }); - data.append(FP8x23 { mag: 201326592, sign: false }); - data.append(FP8x23 { mag: 209715200, sign: false }); - data.append(FP8x23 { mag: 218103808, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_fp8x23_3d_axis1/input_1.cairo b/tests/nodes/gather_elements_fp8x23_3d_axis1/input_1.cairo deleted file mode 100644 index f9c244b79..000000000 --- a/tests/nodes/gather_elements_fp8x23_3d_axis1/input_1.cairo +++ /dev/null @@ -1,40 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; - -fn input_1() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(2); - data.append(0); - data.append(2); - data.append(2); - data.append(2); - data.append(0); - data.append(0); - data.append(2); - data.append(2); - data.append(0); - data.append(2); - data.append(2); - data.append(0); - data.append(2); - data.append(0); - data.append(2); - data.append(1); - data.append(2); - data.append(1); - data.append(2); - data.append(1); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - data.append(2); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_fp8x23_3d_axis1/output_0.cairo b/tests/nodes/gather_elements_fp8x23_3d_axis1/output_0.cairo deleted file mode 100644 index 126c55bfa..000000000 --- a/tests/nodes/gather_elements_fp8x23_3d_axis1/output_0.cairo +++ /dev/null @@ -1,41 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 50331648, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 50331648, sign: false }); - data.append(FP8x23 { mag: 58720256, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 58720256, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 75497472, sign: false }); - data.append(FP8x23 { mag: 134217728, sign: false }); - data.append(FP8x23 { mag: 142606336, sign: false }); - data.append(FP8x23 { mag: 75497472, sign: false }); - data.append(FP8x23 { mag: 134217728, sign: false }); - data.append(FP8x23 { mag: 92274688, sign: false }); - data.append(FP8x23 { mag: 125829120, sign: false }); - data.append(FP8x23 { mag: 109051904, sign: false }); - data.append(FP8x23 { mag: 142606336, sign: false }); - data.append(FP8x23 { mag: 176160768, sign: false }); - data.append(FP8x23 { mag: 209715200, sign: false }); - data.append(FP8x23 { mag: 192937984, sign: false }); - data.append(FP8x23 { mag: 201326592, sign: false }); - data.append(FP8x23 { mag: 184549376, sign: false }); - data.append(FP8x23 { mag: 167772160, sign: false }); - data.append(FP8x23 { mag: 201326592, sign: false }); - data.append(FP8x23 { mag: 209715200, sign: false }); - data.append(FP8x23 { mag: 218103808, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_fp8x23_3d_axis2.cairo b/tests/nodes/gather_elements_fp8x23_3d_axis2.cairo deleted file mode 100644 index e08b0b07c..000000000 --- a/tests/nodes/gather_elements_fp8x23_3d_axis2.cairo +++ /dev/null @@ -1,24 +0,0 @@ -mod input_0; -mod input_1; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_gather_elements_fp8x23_3d_axis2() { - let input_0 = input_0::input_0(); - let input_1 = input_1::input_1(); - let z = output_0::output_0(); - - let y = input_0.gather_elements(indices: input_1, axis: Option::Some(2)); - - assert_eq(y, z); -} diff --git a/tests/nodes/gather_elements_fp8x23_3d_axis2/input_0.cairo b/tests/nodes/gather_elements_fp8x23_3d_axis2/input_0.cairo deleted file mode 100644 index ed60e2f46..000000000 --- a/tests/nodes/gather_elements_fp8x23_3d_axis2/input_0.cairo +++ /dev/null @@ -1,41 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - data.append(FP8x23 { mag: 41943040, sign: false }); - data.append(FP8x23 { mag: 50331648, sign: false }); - data.append(FP8x23 { mag: 58720256, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 75497472, sign: false }); - data.append(FP8x23 { mag: 83886080, sign: false }); - data.append(FP8x23 { mag: 92274688, sign: false }); - data.append(FP8x23 { mag: 100663296, sign: false }); - data.append(FP8x23 { mag: 109051904, sign: false }); - data.append(FP8x23 { mag: 117440512, sign: false }); - data.append(FP8x23 { mag: 125829120, sign: false }); - data.append(FP8x23 { mag: 134217728, sign: false }); - data.append(FP8x23 { mag: 142606336, sign: false }); - data.append(FP8x23 { mag: 150994944, sign: false }); - data.append(FP8x23 { mag: 159383552, sign: false }); - data.append(FP8x23 { mag: 167772160, sign: false }); - data.append(FP8x23 { mag: 176160768, sign: false }); - data.append(FP8x23 { mag: 184549376, sign: false }); - data.append(FP8x23 { mag: 192937984, sign: false }); - data.append(FP8x23 { mag: 201326592, sign: false }); - data.append(FP8x23 { mag: 209715200, sign: false }); - data.append(FP8x23 { mag: 218103808, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_fp8x23_3d_axis2/input_1.cairo b/tests/nodes/gather_elements_fp8x23_3d_axis2/input_1.cairo deleted file mode 100644 index f36baa09d..000000000 --- a/tests/nodes/gather_elements_fp8x23_3d_axis2/input_1.cairo +++ /dev/null @@ -1,40 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; - -fn input_1() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(2); - data.append(2); - data.append(2); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(2); - data.append(1); - data.append(2); - data.append(0); - data.append(2); - data.append(1); - data.append(0); - data.append(0); - data.append(2); - data.append(1); - data.append(1); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_fp8x23_3d_axis2/output_0.cairo b/tests/nodes/gather_elements_fp8x23_3d_axis2/output_0.cairo deleted file mode 100644 index 1778c6ea3..000000000 --- a/tests/nodes/gather_elements_fp8x23_3d_axis2/output_0.cairo +++ /dev/null @@ -1,41 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 83886080, sign: false }); - data.append(FP8x23 { mag: 83886080, sign: false }); - data.append(FP8x23 { mag: 75497472, sign: false }); - data.append(FP8x23 { mag: 100663296, sign: false }); - data.append(FP8x23 { mag: 109051904, sign: false }); - data.append(FP8x23 { mag: 109051904, sign: false }); - data.append(FP8x23 { mag: 134217728, sign: false }); - data.append(FP8x23 { mag: 142606336, sign: false }); - data.append(FP8x23 { mag: 134217728, sign: false }); - data.append(FP8x23 { mag: 167772160, sign: false }); - data.append(FP8x23 { mag: 150994944, sign: false }); - data.append(FP8x23 { mag: 167772160, sign: false }); - data.append(FP8x23 { mag: 184549376, sign: false }); - data.append(FP8x23 { mag: 176160768, sign: false }); - data.append(FP8x23 { mag: 176160768, sign: false }); - data.append(FP8x23 { mag: 218103808, sign: false }); - data.append(FP8x23 { mag: 209715200, sign: false }); - data.append(FP8x23 { mag: 209715200, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_fp8x23_3d_default.cairo b/tests/nodes/gather_elements_fp8x23_3d_default.cairo deleted file mode 100644 index ccf76f1f0..000000000 --- a/tests/nodes/gather_elements_fp8x23_3d_default.cairo +++ /dev/null @@ -1,24 +0,0 @@ -mod input_0; -mod input_1; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_gather_elements_fp8x23_3d_default() { - let input_0 = input_0::input_0(); - let input_1 = input_1::input_1(); - let z = output_0::output_0(); - - let y = input_0.gather_elements(indices: input_1, axis: Option::Some(0)); - - assert_eq(y, z); -} diff --git a/tests/nodes/gather_elements_fp8x23_3d_default/input_0.cairo b/tests/nodes/gather_elements_fp8x23_3d_default/input_0.cairo deleted file mode 100644 index ed60e2f46..000000000 --- a/tests/nodes/gather_elements_fp8x23_3d_default/input_0.cairo +++ /dev/null @@ -1,41 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - data.append(FP8x23 { mag: 41943040, sign: false }); - data.append(FP8x23 { mag: 50331648, sign: false }); - data.append(FP8x23 { mag: 58720256, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 75497472, sign: false }); - data.append(FP8x23 { mag: 83886080, sign: false }); - data.append(FP8x23 { mag: 92274688, sign: false }); - data.append(FP8x23 { mag: 100663296, sign: false }); - data.append(FP8x23 { mag: 109051904, sign: false }); - data.append(FP8x23 { mag: 117440512, sign: false }); - data.append(FP8x23 { mag: 125829120, sign: false }); - data.append(FP8x23 { mag: 134217728, sign: false }); - data.append(FP8x23 { mag: 142606336, sign: false }); - data.append(FP8x23 { mag: 150994944, sign: false }); - data.append(FP8x23 { mag: 159383552, sign: false }); - data.append(FP8x23 { mag: 167772160, sign: false }); - data.append(FP8x23 { mag: 176160768, sign: false }); - data.append(FP8x23 { mag: 184549376, sign: false }); - data.append(FP8x23 { mag: 192937984, sign: false }); - data.append(FP8x23 { mag: 201326592, sign: false }); - data.append(FP8x23 { mag: 209715200, sign: false }); - data.append(FP8x23 { mag: 218103808, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_fp8x23_3d_default/output_0.cairo b/tests/nodes/gather_elements_fp8x23_3d_default/output_0.cairo deleted file mode 100644 index 4acfdfe3d..000000000 --- a/tests/nodes/gather_elements_fp8x23_3d_default/output_0.cairo +++ /dev/null @@ -1,41 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 75497472, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 92274688, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - data.append(FP8x23 { mag: 41943040, sign: false }); - data.append(FP8x23 { mag: 50331648, sign: false }); - data.append(FP8x23 { mag: 58720256, sign: false }); - data.append(FP8x23 { mag: 142606336, sign: false }); - data.append(FP8x23 { mag: 75497472, sign: false }); - data.append(FP8x23 { mag: 83886080, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 100663296, sign: false }); - data.append(FP8x23 { mag: 109051904, sign: false }); - data.append(FP8x23 { mag: 117440512, sign: false }); - data.append(FP8x23 { mag: 50331648, sign: false }); - data.append(FP8x23 { mag: 134217728, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 92274688, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - data.append(FP8x23 { mag: 117440512, sign: false }); - data.append(FP8x23 { mag: 50331648, sign: false }); - data.append(FP8x23 { mag: 58720256, sign: false }); - data.append(FP8x23 { mag: 67108864, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_i32_3d_axis1.cairo b/tests/nodes/gather_elements_i32_3d_axis1.cairo deleted file mode 100644 index 4295d1451..000000000 --- a/tests/nodes/gather_elements_i32_3d_axis1.cairo +++ /dev/null @@ -1,24 +0,0 @@ -mod input_0; -mod input_1; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_gather_elements_i32_3d_axis1() { - let input_0 = input_0::input_0(); - let input_1 = input_1::input_1(); - let z = output_0::output_0(); - - let y = input_0.gather_elements(indices: input_1, axis: Option::Some(1)); - - assert_eq(y, z); -} diff --git a/tests/nodes/gather_elements_i32_3d_axis1/input_0.cairo b/tests/nodes/gather_elements_i32_3d_axis1/input_0.cairo deleted file mode 100644 index a2ac80e2e..000000000 --- a/tests/nodes/gather_elements_i32_3d_axis1/input_0.cairo +++ /dev/null @@ -1,37 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(4); - shape.append(2); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_i32_3d_axis1/input_1.cairo b/tests/nodes/gather_elements_i32_3d_axis1/input_1.cairo deleted file mode 100644 index 92e1ce88d..000000000 --- a/tests/nodes/gather_elements_i32_3d_axis1/input_1.cairo +++ /dev/null @@ -1,49 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; - -fn input_1() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(4); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_i32_3d_axis1/output_0.cairo b/tests/nodes/gather_elements_i32_3d_axis1/output_0.cairo deleted file mode 100644 index dfd00a470..000000000 --- a/tests/nodes/gather_elements_i32_3d_axis1/output_0.cairo +++ /dev/null @@ -1,49 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(4); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(0); - data.append(4); - data.append(2); - data.append(0); - data.append(1); - data.append(5); - data.append(6); - data.append(10); - data.append(11); - data.append(9); - data.append(7); - data.append(8); - data.append(6); - data.append(7); - data.append(8); - data.append(15); - data.append(16); - data.append(14); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(14); - data.append(18); - data.append(22); - data.append(20); - data.append(21); - data.append(22); - data.append(20); - data.append(21); - data.append(19); - data.append(20); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_i32_3d_axis2.cairo b/tests/nodes/gather_elements_i32_3d_axis2.cairo deleted file mode 100644 index 58dd1ce18..000000000 --- a/tests/nodes/gather_elements_i32_3d_axis2.cairo +++ /dev/null @@ -1,24 +0,0 @@ -mod input_0; -mod input_1; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_gather_elements_i32_3d_axis2() { - let input_0 = input_0::input_0(); - let input_1 = input_1::input_1(); - let z = output_0::output_0(); - - let y = input_0.gather_elements(indices: input_1, axis: Option::Some(2)); - - assert_eq(y, z); -} diff --git a/tests/nodes/gather_elements_i32_3d_axis2/input_0.cairo b/tests/nodes/gather_elements_i32_3d_axis2/input_0.cairo deleted file mode 100644 index a2ac80e2e..000000000 --- a/tests/nodes/gather_elements_i32_3d_axis2/input_0.cairo +++ /dev/null @@ -1,37 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(4); - shape.append(2); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_i32_3d_axis2/input_1.cairo b/tests/nodes/gather_elements_i32_3d_axis2/input_1.cairo deleted file mode 100644 index ab202801d..000000000 --- a/tests/nodes/gather_elements_i32_3d_axis2/input_1.cairo +++ /dev/null @@ -1,45 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; - -fn input_1() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(4); - shape.append(2); - shape.append(4); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_i32_3d_axis2/output_0.cairo b/tests/nodes/gather_elements_i32_3d_axis2/output_0.cairo deleted file mode 100644 index a20f34592..000000000 --- a/tests/nodes/gather_elements_i32_3d_axis2/output_0.cairo +++ /dev/null @@ -1,45 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(4); - shape.append(2); - shape.append(4); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(4); - data.append(4); - data.append(3); - data.append(3); - data.append(6); - data.append(7); - data.append(7); - data.append(6); - data.append(9); - data.append(10); - data.append(9); - data.append(9); - data.append(13); - data.append(12); - data.append(12); - data.append(13); - data.append(15); - data.append(16); - data.append(15); - data.append(16); - data.append(19); - data.append(19); - data.append(18); - data.append(18); - data.append(21); - data.append(22); - data.append(21); - data.append(21); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_i32_3d_default/input_0.cairo b/tests/nodes/gather_elements_i32_3d_default/input_0.cairo deleted file mode 100644 index a2ac80e2e..000000000 --- a/tests/nodes/gather_elements_i32_3d_default/input_0.cairo +++ /dev/null @@ -1,37 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(4); - shape.append(2); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_i32_3d_default/input_1.cairo b/tests/nodes/gather_elements_i32_3d_default/input_1.cairo deleted file mode 100644 index 8a1527e8a..000000000 --- a/tests/nodes/gather_elements_i32_3d_default/input_1.cairo +++ /dev/null @@ -1,43 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; - -fn input_1() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(5); - shape.append(2); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_i32_3d_default/output_0.cairo b/tests/nodes/gather_elements_i32_3d_default/output_0.cairo deleted file mode 100644 index 244b8aac7..000000000 --- a/tests/nodes/gather_elements_i32_3d_default/output_0.cairo +++ /dev/null @@ -1,43 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(5); - shape.append(2); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(7); - data.append(2); - data.append(9); - data.append(10); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(4); - data.append(5); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(10); - data.append(5); - data.append(0); - data.append(7); - data.append(2); - data.append(3); - data.append(10); - data.append(5); - data.append(0); - data.append(7); - data.append(8); - data.append(9); - data.append(4); - data.append(11); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_i8_3d_axis1.cairo b/tests/nodes/gather_elements_i8_3d_axis1.cairo deleted file mode 100644 index c04ea22a4..000000000 --- a/tests/nodes/gather_elements_i8_3d_axis1.cairo +++ /dev/null @@ -1,24 +0,0 @@ -mod input_0; -mod input_1; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; - -#[test] -#[available_gas(2000000000)] -fn test_gather_elements_i8_3d_axis1() { - let input_0 = input_0::input_0(); - let input_1 = input_1::input_1(); - let z = output_0::output_0(); - - let y = input_0.gather_elements(indices: input_1, axis: Option::Some(1)); - - assert_eq(y, z); -} diff --git a/tests/nodes/gather_elements_i8_3d_axis1/input_0.cairo b/tests/nodes/gather_elements_i8_3d_axis1/input_0.cairo deleted file mode 100644 index cdb58470f..000000000 --- a/tests/nodes/gather_elements_i8_3d_axis1/input_0.cairo +++ /dev/null @@ -1,21 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_i8_3d_axis1/output_0.cairo b/tests/nodes/gather_elements_i8_3d_axis1/output_0.cairo deleted file mode 100644 index 382eae98b..000000000 --- a/tests/nodes/gather_elements_i8_3d_axis1/output_0.cairo +++ /dev/null @@ -1,21 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(0); - data.append(3); - data.append(4); - data.append(3); - data.append(7); - data.append(7); - data.append(7); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_i8_3d_default.cairo b/tests/nodes/gather_elements_i8_3d_default.cairo deleted file mode 100644 index 2ee5ae793..000000000 --- a/tests/nodes/gather_elements_i8_3d_default.cairo +++ /dev/null @@ -1,24 +0,0 @@ -mod input_0; -mod input_1; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; - -#[test] -#[available_gas(2000000000)] -fn test_gather_elements_i8_3d_default() { - let input_0 = input_0::input_0(); - let input_1 = input_1::input_1(); - let z = output_0::output_0(); - - let y = input_0.gather_elements(indices: input_1, axis: Option::Some(0)); - - assert_eq(y, z); -} diff --git a/tests/nodes/gather_elements_i8_3d_default/input_0.cairo b/tests/nodes/gather_elements_i8_3d_default/input_0.cairo deleted file mode 100644 index cdb58470f..000000000 --- a/tests/nodes/gather_elements_i8_3d_default/input_0.cairo +++ /dev/null @@ -1,21 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_i8_3d_default/output_0.cairo b/tests/nodes/gather_elements_i8_3d_default/output_0.cairo deleted file mode 100644 index 264b0285c..000000000 --- a/tests/nodes/gather_elements_i8_3d_default/output_0.cairo +++ /dev/null @@ -1,21 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorSub}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(3); - data.append(4); - data.append(2); - data.append(3); - data.append(1); - data.append(5); - data.append(3); - data.append(1); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_i32_3d_default.cairo b/tests/nodes/gather_elements_negative_indices.cairo similarity index 51% rename from tests/nodes/gather_elements_i32_3d_default.cairo rename to tests/nodes/gather_elements_negative_indices.cairo index c09b8ebe8..0aff55566 100644 --- a/tests/nodes/gather_elements_i32_3d_default.cairo +++ b/tests/nodes/gather_elements_negative_indices.cairo @@ -4,21 +4,21 @@ mod output_0; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::I32TensorPartialEq; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorSub}; -use orion::operators::tensor::U32TensorPartialEq; #[test] #[available_gas(2000000000)] -fn test_gather_elements_i32_3d_default() { +fn test_gather_elements_negative_indices() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.gather_elements(indices: input_1, axis: Option::Some(0)); + let y_0 = input_0.gather_elements(indices:input_1, axis:Option::Some(0)); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/gather_elements_negative_indices/input_0.cairo b/tests/nodes/gather_elements_negative_indices/input_0.cairo new file mode 100644 index 000000000..68c12b946 --- /dev/null +++ b/tests/nodes/gather_elements_negative_indices/input_0.cairo @@ -0,0 +1,22 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/gather_elements_i8_3d_default/input_1.cairo b/tests/nodes/gather_elements_negative_indices/input_1.cairo similarity index 57% rename from tests/nodes/gather_elements_i8_3d_default/input_1.cairo rename to tests/nodes/gather_elements_negative_indices/input_1.cairo index f1fca4a90..915663d52 100644 --- a/tests/nodes/gather_elements_i8_3d_default/input_1.cairo +++ b/tests/nodes/gather_elements_negative_indices/input_1.cairo @@ -1,20 +1,18 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn input_1() -> Tensor { +fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); - shape.append(3); + shape.append(2); shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); - data.append(0); - data.append(1); + data.append(-1); + data.append(-2); data.append(0); - data.append(1); - data.append(1); + data.append(-2); data.append(0); data.append(0); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/gather_elements_negative_indices/output_0.cairo b/tests/nodes/gather_elements_negative_indices/output_0.cairo new file mode 100644 index 000000000..5f1934376 --- /dev/null +++ b/tests/nodes/gather_elements_negative_indices/output_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/gather_elements_u32_axis1.cairo b/tests/nodes/gather_elements_u32_axis1.cairo deleted file mode 100644 index c1926f886..000000000 --- a/tests/nodes/gather_elements_u32_axis1.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod input_1; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_gather_elements_u32_axis1() { - let input_0 = input_0::input_0(); - let input_1 = input_1::input_1(); - let z = output_0::output_0(); - - let y = input_0.gather_elements(indices: input_1, axis: Option::Some(1)); - - assert_eq(y, z); -} diff --git a/tests/nodes/gather_elements_u32_axis1/input_0.cairo b/tests/nodes/gather_elements_u32_axis1/input_0.cairo deleted file mode 100644 index 448e91a7e..000000000 --- a/tests/nodes/gather_elements_u32_axis1/input_0.cairo +++ /dev/null @@ -1,122 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(4); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - data.append(27); - data.append(28); - data.append(29); - data.append(30); - data.append(31); - data.append(32); - data.append(33); - data.append(34); - data.append(35); - data.append(36); - data.append(37); - data.append(38); - data.append(39); - data.append(40); - data.append(41); - data.append(42); - data.append(43); - data.append(44); - data.append(45); - data.append(46); - data.append(47); - data.append(48); - data.append(49); - data.append(50); - data.append(51); - data.append(52); - data.append(53); - data.append(54); - data.append(55); - data.append(56); - data.append(57); - data.append(58); - data.append(59); - data.append(60); - data.append(61); - data.append(62); - data.append(63); - data.append(64); - data.append(65); - data.append(66); - data.append(67); - data.append(68); - data.append(69); - data.append(70); - data.append(71); - data.append(72); - data.append(73); - data.append(74); - data.append(75); - data.append(76); - data.append(77); - data.append(78); - data.append(79); - data.append(80); - data.append(81); - data.append(82); - data.append(83); - data.append(84); - data.append(85); - data.append(86); - data.append(87); - data.append(88); - data.append(89); - data.append(90); - data.append(91); - data.append(92); - data.append(93); - data.append(94); - data.append(95); - data.append(96); - data.append(97); - data.append(98); - data.append(99); - data.append(100); - data.append(101); - data.append(102); - data.append(103); - data.append(104); - data.append(105); - data.append(106); - data.append(107); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_u32_axis1/input_1.cairo b/tests/nodes/gather_elements_u32_axis1/input_1.cairo deleted file mode 100644 index ab2deb4c3..000000000 --- a/tests/nodes/gather_elements_u32_axis1/input_1.cairo +++ /dev/null @@ -1,194 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; - -fn input_1() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(5); - shape.append(4); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(2); - data.append(0); - data.append(2); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(2); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(2); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - data.append(2); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(2); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(2); - data.append(2); - data.append(2); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(2); - data.append(0); - data.append(0); - data.append(2); - data.append(2); - data.append(2); - data.append(1); - data.append(2); - data.append(2); - data.append(0); - data.append(2); - data.append(2); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(2); - data.append(2); - data.append(0); - data.append(2); - data.append(2); - data.append(2); - data.append(2); - data.append(2); - data.append(2); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(2); - data.append(0); - data.append(0); - data.append(2); - data.append(1); - data.append(2); - data.append(1); - data.append(1); - data.append(2); - data.append(0); - data.append(1); - data.append(0); - data.append(2); - data.append(1); - data.append(2); - data.append(2); - data.append(2); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(2); - data.append(2); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(2); - data.append(0); - data.append(0); - data.append(1); - data.append(2); - data.append(2); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(2); - data.append(2); - data.append(0); - data.append(2); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(2); - data.append(0); - data.append(2); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(2); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(2); - data.append(1); - data.append(1); - data.append(0); - data.append(2); - data.append(2); - data.append(1); - data.append(0); - data.append(1); - data.append(2); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(2); - data.append(2); - data.append(0); - data.append(2); - data.append(0); - data.append(2); - data.append(0); - data.append(2); - data.append(2); - data.append(2); - data.append(0); - data.append(0); - data.append(0); - data.append(2); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_u32_axis1/output_0.cairo b/tests/nodes/gather_elements_u32_axis1/output_0.cairo deleted file mode 100644 index a059c88ba..000000000 --- a/tests/nodes/gather_elements_u32_axis1/output_0.cairo +++ /dev/null @@ -1,194 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(5); - shape.append(4); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(24); - data.append(1); - data.append(26); - data.append(27); - data.append(16); - data.append(5); - data.append(30); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(25); - data.append(14); - data.append(27); - data.append(4); - data.append(17); - data.append(18); - data.append(19); - data.append(8); - data.append(33); - data.append(34); - data.append(23); - data.append(0); - data.append(25); - data.append(26); - data.append(15); - data.append(4); - data.append(17); - data.append(6); - data.append(7); - data.append(32); - data.append(9); - data.append(22); - data.append(11); - data.append(12); - data.append(13); - data.append(2); - data.append(27); - data.append(28); - data.append(29); - data.append(18); - data.append(7); - data.append(8); - data.append(21); - data.append(34); - data.append(11); - data.append(0); - data.append(25); - data.append(26); - data.append(27); - data.append(16); - data.append(29); - data.append(30); - data.append(7); - data.append(32); - data.append(33); - data.append(10); - data.append(23); - data.append(48); - data.append(37); - data.append(50); - data.append(51); - data.append(64); - data.append(65); - data.append(42); - data.append(67); - data.append(68); - data.append(69); - data.append(70); - data.append(71); - data.append(60); - data.append(37); - data.append(38); - data.append(39); - data.append(40); - data.append(41); - data.append(42); - data.append(55); - data.append(56); - data.append(69); - data.append(46); - data.append(47); - data.append(60); - data.append(49); - data.append(62); - data.append(51); - data.append(52); - data.append(65); - data.append(42); - data.append(55); - data.append(44); - data.append(69); - data.append(58); - data.append(71); - data.append(60); - data.append(61); - data.append(38); - data.append(39); - data.append(52); - data.append(53); - data.append(42); - data.append(55); - data.append(68); - data.append(69); - data.append(46); - data.append(47); - data.append(36); - data.append(49); - data.append(38); - data.append(51); - data.append(64); - data.append(53); - data.append(66); - data.append(43); - data.append(44); - data.append(57); - data.append(70); - data.append(71); - data.append(84); - data.append(73); - data.append(86); - data.append(75); - data.append(76); - data.append(101); - data.append(102); - data.append(79); - data.append(104); - data.append(93); - data.append(94); - data.append(95); - data.append(72); - data.append(85); - data.append(98); - data.append(75); - data.append(100); - data.append(77); - data.append(90); - data.append(91); - data.append(92); - data.append(81); - data.append(94); - data.append(83); - data.append(96); - data.append(85); - data.append(86); - data.append(75); - data.append(88); - data.append(77); - data.append(102); - data.append(91); - data.append(92); - data.append(81); - data.append(106); - data.append(107); - data.append(84); - data.append(73); - data.append(86); - data.append(99); - data.append(76); - data.append(77); - data.append(90); - data.append(79); - data.append(92); - data.append(105); - data.append(106); - data.append(83); - data.append(96); - data.append(73); - data.append(98); - data.append(75); - data.append(100); - data.append(101); - data.append(102); - data.append(79); - data.append(80); - data.append(81); - data.append(106); - data.append(107); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_u32_axis2.cairo b/tests/nodes/gather_elements_u32_axis2.cairo deleted file mode 100644 index 66a23c66b..000000000 --- a/tests/nodes/gather_elements_u32_axis2.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod input_1; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_gather_elements_u32_axis2() { - let input_0 = input_0::input_0(); - let input_1 = input_1::input_1(); - let z = output_0::output_0(); - - let y = input_0.gather_elements(indices: input_1, axis: Option::Some(2)); - - assert_eq(y, z); -} diff --git a/tests/nodes/gather_elements_u32_axis2/input_0.cairo b/tests/nodes/gather_elements_u32_axis2/input_0.cairo deleted file mode 100644 index 448e91a7e..000000000 --- a/tests/nodes/gather_elements_u32_axis2/input_0.cairo +++ /dev/null @@ -1,122 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(4); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - data.append(27); - data.append(28); - data.append(29); - data.append(30); - data.append(31); - data.append(32); - data.append(33); - data.append(34); - data.append(35); - data.append(36); - data.append(37); - data.append(38); - data.append(39); - data.append(40); - data.append(41); - data.append(42); - data.append(43); - data.append(44); - data.append(45); - data.append(46); - data.append(47); - data.append(48); - data.append(49); - data.append(50); - data.append(51); - data.append(52); - data.append(53); - data.append(54); - data.append(55); - data.append(56); - data.append(57); - data.append(58); - data.append(59); - data.append(60); - data.append(61); - data.append(62); - data.append(63); - data.append(64); - data.append(65); - data.append(66); - data.append(67); - data.append(68); - data.append(69); - data.append(70); - data.append(71); - data.append(72); - data.append(73); - data.append(74); - data.append(75); - data.append(76); - data.append(77); - data.append(78); - data.append(79); - data.append(80); - data.append(81); - data.append(82); - data.append(83); - data.append(84); - data.append(85); - data.append(86); - data.append(87); - data.append(88); - data.append(89); - data.append(90); - data.append(91); - data.append(92); - data.append(93); - data.append(94); - data.append(95); - data.append(96); - data.append(97); - data.append(98); - data.append(99); - data.append(100); - data.append(101); - data.append(102); - data.append(103); - data.append(104); - data.append(105); - data.append(106); - data.append(107); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_u32_axis2/input_1.cairo b/tests/nodes/gather_elements_u32_axis2/input_1.cairo deleted file mode 100644 index 172e6d020..000000000 --- a/tests/nodes/gather_elements_u32_axis2/input_1.cairo +++ /dev/null @@ -1,122 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; - -fn input_1() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(4); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(2); - data.append(2); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(2); - data.append(1); - data.append(2); - data.append(0); - data.append(0); - data.append(2); - data.append(2); - data.append(2); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(2); - data.append(0); - data.append(2); - data.append(0); - data.append(1); - data.append(1); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - data.append(2); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(2); - data.append(2); - data.append(0); - data.append(1); - data.append(0); - data.append(2); - data.append(2); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(2); - data.append(1); - data.append(0); - data.append(0); - data.append(2); - data.append(1); - data.append(2); - data.append(2); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - data.append(2); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(2); - data.append(1); - data.append(2); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(2); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(2); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(2); - data.append(2); - data.append(0); - data.append(0); - data.append(2); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_u32_axis2/output_0.cairo b/tests/nodes/gather_elements_u32_axis2/output_0.cairo deleted file mode 100644 index da57b3055..000000000 --- a/tests/nodes/gather_elements_u32_axis2/output_0.cairo +++ /dev/null @@ -1,122 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(4); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(6); - data.append(7); - data.append(2); - data.append(3); - data.append(7); - data.append(5); - data.append(6); - data.append(4); - data.append(8); - data.append(0); - data.append(1); - data.append(8); - data.append(18); - data.append(19); - data.append(14); - data.append(12); - data.append(16); - data.append(17); - data.append(15); - data.append(13); - data.append(17); - data.append(15); - data.append(13); - data.append(20); - data.append(24); - data.append(31); - data.append(26); - data.append(27); - data.append(28); - data.append(32); - data.append(27); - data.append(25); - data.append(32); - data.append(30); - data.append(28); - data.append(26); - data.append(36); - data.append(37); - data.append(41); - data.append(36); - data.append(40); - data.append(44); - data.append(42); - data.append(37); - data.append(41); - data.append(36); - data.append(43); - data.append(44); - data.append(48); - data.append(52); - data.append(53); - data.append(48); - data.append(55); - data.append(53); - data.append(48); - data.append(49); - data.append(56); - data.append(51); - data.append(55); - data.append(56); - data.append(63); - data.append(64); - data.append(62); - data.append(63); - data.append(61); - data.append(62); - data.append(60); - data.append(61); - data.append(68); - data.append(63); - data.append(61); - data.append(65); - data.append(75); - data.append(73); - data.append(80); - data.append(75); - data.append(79); - data.append(77); - data.append(75); - data.append(73); - data.append(77); - data.append(72); - data.append(76); - data.append(74); - data.append(84); - data.append(88); - data.append(92); - data.append(87); - data.append(91); - data.append(89); - data.append(84); - data.append(85); - data.append(89); - data.append(90); - data.append(91); - data.append(89); - data.append(96); - data.append(103); - data.append(98); - data.append(99); - data.append(100); - data.append(101); - data.append(102); - data.append(103); - data.append(98); - data.append(96); - data.append(103); - data.append(104); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_u32_axis3.cairo b/tests/nodes/gather_elements_u32_axis3.cairo deleted file mode 100644 index 020d1f519..000000000 --- a/tests/nodes/gather_elements_u32_axis3.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod input_1; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_gather_elements_u32_axis3() { - let input_0 = input_0::input_0(); - let input_1 = input_1::input_1(); - let z = output_0::output_0(); - - let y = input_0.gather_elements(indices: input_1, axis: Option::Some(3)); - - assert_eq(y, z); -} diff --git a/tests/nodes/gather_elements_u32_axis3/input_0.cairo b/tests/nodes/gather_elements_u32_axis3/input_0.cairo deleted file mode 100644 index 448e91a7e..000000000 --- a/tests/nodes/gather_elements_u32_axis3/input_0.cairo +++ /dev/null @@ -1,122 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(4); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - data.append(27); - data.append(28); - data.append(29); - data.append(30); - data.append(31); - data.append(32); - data.append(33); - data.append(34); - data.append(35); - data.append(36); - data.append(37); - data.append(38); - data.append(39); - data.append(40); - data.append(41); - data.append(42); - data.append(43); - data.append(44); - data.append(45); - data.append(46); - data.append(47); - data.append(48); - data.append(49); - data.append(50); - data.append(51); - data.append(52); - data.append(53); - data.append(54); - data.append(55); - data.append(56); - data.append(57); - data.append(58); - data.append(59); - data.append(60); - data.append(61); - data.append(62); - data.append(63); - data.append(64); - data.append(65); - data.append(66); - data.append(67); - data.append(68); - data.append(69); - data.append(70); - data.append(71); - data.append(72); - data.append(73); - data.append(74); - data.append(75); - data.append(76); - data.append(77); - data.append(78); - data.append(79); - data.append(80); - data.append(81); - data.append(82); - data.append(83); - data.append(84); - data.append(85); - data.append(86); - data.append(87); - data.append(88); - data.append(89); - data.append(90); - data.append(91); - data.append(92); - data.append(93); - data.append(94); - data.append(95); - data.append(96); - data.append(97); - data.append(98); - data.append(99); - data.append(100); - data.append(101); - data.append(102); - data.append(103); - data.append(104); - data.append(105); - data.append(106); - data.append(107); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_u32_axis3/input_1.cairo b/tests/nodes/gather_elements_u32_axis3/input_1.cairo deleted file mode 100644 index 727fa793f..000000000 --- a/tests/nodes/gather_elements_u32_axis3/input_1.cairo +++ /dev/null @@ -1,230 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; - -fn input_1() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(4); - shape.append(6); - - let mut data = ArrayTrait::new(); - data.append(2); - data.append(2); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(2); - data.append(0); - data.append(1); - data.append(0); - data.append(2); - data.append(1); - data.append(2); - data.append(2); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(2); - data.append(0); - data.append(0); - data.append(2); - data.append(2); - data.append(2); - data.append(2); - data.append(1); - data.append(1); - data.append(0); - data.append(2); - data.append(2); - data.append(2); - data.append(0); - data.append(0); - data.append(2); - data.append(0); - data.append(2); - data.append(0); - data.append(1); - data.append(1); - data.append(2); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(2); - data.append(2); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(2); - data.append(2); - data.append(0); - data.append(0); - data.append(0); - data.append(2); - data.append(2); - data.append(2); - data.append(2); - data.append(2); - data.append(0); - data.append(1); - data.append(2); - data.append(0); - data.append(1); - data.append(0); - data.append(2); - data.append(2); - data.append(2); - data.append(0); - data.append(2); - data.append(0); - data.append(0); - data.append(2); - data.append(0); - data.append(0); - data.append(2); - data.append(1); - data.append(1); - data.append(2); - data.append(0); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - data.append(0); - data.append(1); - data.append(0); - data.append(2); - data.append(0); - data.append(2); - data.append(0); - data.append(2); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(2); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(2); - data.append(2); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(2); - data.append(1); - data.append(1); - data.append(0); - data.append(2); - data.append(2); - data.append(1); - data.append(2); - data.append(2); - data.append(1); - data.append(2); - data.append(1); - data.append(2); - data.append(1); - data.append(2); - data.append(0); - data.append(2); - data.append(2); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(2); - data.append(2); - data.append(2); - data.append(2); - data.append(1); - data.append(1); - data.append(2); - data.append(1); - data.append(0); - data.append(0); - data.append(2); - data.append(1); - data.append(1); - data.append(2); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(2); - data.append(1); - data.append(1); - data.append(2); - data.append(1); - data.append(1); - data.append(2); - data.append(2); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(2); - data.append(2); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - data.append(2); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(2); - data.append(0); - data.append(0); - data.append(2); - data.append(2); - data.append(2); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(2); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_u32_axis3/output_0.cairo b/tests/nodes/gather_elements_u32_axis3/output_0.cairo deleted file mode 100644 index eb4f17465..000000000 --- a/tests/nodes/gather_elements_u32_axis3/output_0.cairo +++ /dev/null @@ -1,230 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(4); - shape.append(6); - - let mut data = ArrayTrait::new(); - data.append(2); - data.append(2); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(4); - data.append(4); - data.append(3); - data.append(5); - data.append(3); - data.append(4); - data.append(6); - data.append(8); - data.append(7); - data.append(8); - data.append(8); - data.append(6); - data.append(10); - data.append(10); - data.append(9); - data.append(10); - data.append(11); - data.append(9); - data.append(12); - data.append(14); - data.append(14); - data.append(14); - data.append(14); - data.append(13); - data.append(16); - data.append(15); - data.append(17); - data.append(17); - data.append(17); - data.append(15); - data.append(18); - data.append(20); - data.append(18); - data.append(20); - data.append(18); - data.append(19); - data.append(22); - data.append(23); - data.append(21); - data.append(21); - data.append(22); - data.append(21); - data.append(24); - data.append(24); - data.append(25); - data.append(26); - data.append(26); - data.append(24); - data.append(28); - data.append(28); - data.append(28); - data.append(28); - data.append(29); - data.append(29); - data.append(30); - data.append(30); - data.append(30); - data.append(32); - data.append(32); - data.append(32); - data.append(35); - data.append(35); - data.append(33); - data.append(34); - data.append(35); - data.append(33); - data.append(37); - data.append(36); - data.append(38); - data.append(38); - data.append(38); - data.append(36); - data.append(41); - data.append(39); - data.append(39); - data.append(41); - data.append(39); - data.append(39); - data.append(44); - data.append(43); - data.append(43); - data.append(44); - data.append(42); - data.append(44); - data.append(46); - data.append(45); - data.append(47); - data.append(45); - data.append(46); - data.append(45); - data.append(50); - data.append(48); - data.append(50); - data.append(48); - data.append(50); - data.append(49); - data.append(51); - data.append(52); - data.append(52); - data.append(51); - data.append(53); - data.append(52); - data.append(55); - data.append(54); - data.append(55); - data.append(55); - data.append(55); - data.append(54); - data.append(57); - data.append(58); - data.append(59); - data.append(59); - data.append(58); - data.append(58); - data.append(60); - data.append(61); - data.append(61); - data.append(62); - data.append(61); - data.append(61); - data.append(63); - data.append(65); - data.append(65); - data.append(64); - data.append(65); - data.append(65); - data.append(67); - data.append(68); - data.append(67); - data.append(68); - data.append(67); - data.append(68); - data.append(69); - data.append(71); - data.append(71); - data.append(69); - data.append(70); - data.append(69); - data.append(73); - data.append(72); - data.append(73); - data.append(72); - data.append(72); - data.append(74); - data.append(77); - data.append(77); - data.append(77); - data.append(76); - data.append(76); - data.append(77); - data.append(79); - data.append(78); - data.append(78); - data.append(80); - data.append(79); - data.append(79); - data.append(83); - data.append(81); - data.append(81); - data.append(82); - data.append(82); - data.append(83); - data.append(85); - data.append(85); - data.append(86); - data.append(85); - data.append(85); - data.append(86); - data.append(89); - data.append(87); - data.append(88); - data.append(88); - data.append(87); - data.append(87); - data.append(90); - data.append(92); - data.append(92); - data.append(90); - data.append(90); - data.append(91); - data.append(93); - data.append(95); - data.append(94); - data.append(93); - data.append(95); - data.append(95); - data.append(97); - data.append(97); - data.append(97); - data.append(97); - data.append(98); - data.append(96); - data.append(99); - data.append(101); - data.append(101); - data.append(101); - data.append(100); - data.append(100); - data.append(102); - data.append(103); - data.append(103); - data.append(103); - data.append(104); - data.append(102); - data.append(105); - data.append(105); - data.append(106); - data.append(106); - data.append(105); - data.append(106); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_u32_default.cairo b/tests/nodes/gather_elements_u32_default.cairo deleted file mode 100644 index 0674675d4..000000000 --- a/tests/nodes/gather_elements_u32_default.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod input_1; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_gather_elements_u32_default() { - let input_0 = input_0::input_0(); - let input_1 = input_1::input_1(); - let z = output_0::output_0(); - - let y = input_0.gather_elements(indices: input_1, axis: Option::Some(0)); - - assert_eq(y, z); -} diff --git a/tests/nodes/gather_elements_u32_default/input_0.cairo b/tests/nodes/gather_elements_u32_default/input_0.cairo deleted file mode 100644 index 448e91a7e..000000000 --- a/tests/nodes/gather_elements_u32_default/input_0.cairo +++ /dev/null @@ -1,122 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - shape.append(3); - shape.append(4); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - data.append(3); - data.append(4); - data.append(5); - data.append(6); - data.append(7); - data.append(8); - data.append(9); - data.append(10); - data.append(11); - data.append(12); - data.append(13); - data.append(14); - data.append(15); - data.append(16); - data.append(17); - data.append(18); - data.append(19); - data.append(20); - data.append(21); - data.append(22); - data.append(23); - data.append(24); - data.append(25); - data.append(26); - data.append(27); - data.append(28); - data.append(29); - data.append(30); - data.append(31); - data.append(32); - data.append(33); - data.append(34); - data.append(35); - data.append(36); - data.append(37); - data.append(38); - data.append(39); - data.append(40); - data.append(41); - data.append(42); - data.append(43); - data.append(44); - data.append(45); - data.append(46); - data.append(47); - data.append(48); - data.append(49); - data.append(50); - data.append(51); - data.append(52); - data.append(53); - data.append(54); - data.append(55); - data.append(56); - data.append(57); - data.append(58); - data.append(59); - data.append(60); - data.append(61); - data.append(62); - data.append(63); - data.append(64); - data.append(65); - data.append(66); - data.append(67); - data.append(68); - data.append(69); - data.append(70); - data.append(71); - data.append(72); - data.append(73); - data.append(74); - data.append(75); - data.append(76); - data.append(77); - data.append(78); - data.append(79); - data.append(80); - data.append(81); - data.append(82); - data.append(83); - data.append(84); - data.append(85); - data.append(86); - data.append(87); - data.append(88); - data.append(89); - data.append(90); - data.append(91); - data.append(92); - data.append(93); - data.append(94); - data.append(95); - data.append(96); - data.append(97); - data.append(98); - data.append(99); - data.append(100); - data.append(101); - data.append(102); - data.append(103); - data.append(104); - data.append(105); - data.append(106); - data.append(107); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_u32_default/input_1.cairo b/tests/nodes/gather_elements_u32_default/input_1.cairo deleted file mode 100644 index 75c7988d5..000000000 --- a/tests/nodes/gather_elements_u32_default/input_1.cairo +++ /dev/null @@ -1,374 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; - -fn input_1() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(10); - shape.append(3); - shape.append(4); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(2); - data.append(1); - data.append(1); - data.append(2); - data.append(1); - data.append(0); - data.append(1); - data.append(2); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(2); - data.append(1); - data.append(1); - data.append(2); - data.append(1); - data.append(2); - data.append(2); - data.append(0); - data.append(1); - data.append(0); - data.append(2); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(2); - data.append(2); - data.append(2); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(2); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - data.append(2); - data.append(2); - data.append(2); - data.append(0); - data.append(2); - data.append(1); - data.append(1); - data.append(1); - data.append(2); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(2); - data.append(2); - data.append(1); - data.append(1); - data.append(2); - data.append(0); - data.append(2); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(2); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(2); - data.append(2); - data.append(2); - data.append(2); - data.append(2); - data.append(1); - data.append(1); - data.append(1); - data.append(2); - data.append(2); - data.append(2); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(2); - data.append(2); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(1); - data.append(1); - data.append(2); - data.append(1); - data.append(1); - data.append(2); - data.append(2); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(2); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(2); - data.append(2); - data.append(2); - data.append(2); - data.append(2); - data.append(2); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(2); - data.append(0); - data.append(0); - data.append(2); - data.append(2); - data.append(2); - data.append(1); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - data.append(2); - data.append(0); - data.append(2); - data.append(2); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(2); - data.append(2); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - data.append(2); - data.append(0); - data.append(2); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(2); - data.append(1); - data.append(2); - data.append(1); - data.append(1); - data.append(2); - data.append(0); - data.append(0); - data.append(0); - data.append(2); - data.append(2); - data.append(0); - data.append(2); - data.append(0); - data.append(1); - data.append(2); - data.append(2); - data.append(2); - data.append(1); - data.append(2); - data.append(0); - data.append(2); - data.append(2); - data.append(0); - data.append(0); - data.append(0); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - data.append(0); - data.append(0); - data.append(0); - data.append(2); - data.append(1); - data.append(2); - data.append(1); - data.append(1); - data.append(2); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(2); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(2); - data.append(2); - data.append(1); - data.append(0); - data.append(1); - data.append(2); - data.append(0); - data.append(2); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - data.append(2); - data.append(1); - data.append(1); - data.append(0); - data.append(2); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(2); - data.append(1); - data.append(0); - data.append(2); - data.append(0); - data.append(2); - data.append(2); - data.append(0); - data.append(0); - data.append(1); - data.append(2); - data.append(2); - data.append(1); - data.append(2); - data.append(0); - data.append(2); - data.append(1); - data.append(2); - data.append(0); - data.append(2); - data.append(2); - data.append(1); - data.append(2); - data.append(2); - data.append(0); - data.append(2); - data.append(2); - data.append(2); - data.append(1); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(2); - data.append(0); - data.append(1); - data.append(0); - data.append(2); - data.append(2); - data.append(0); - data.append(2); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(2); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - data.append(2); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(2); - data.append(0); - data.append(2); - data.append(1); - data.append(2); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - data.append(2); - data.append(1); - data.append(1); - data.append(1); - data.append(2); - data.append(0); - data.append(1); - data.append(2); - data.append(1); - data.append(2); - data.append(0); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - data.append(1); - data.append(2); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/gather_elements_u32_default/output_0.cairo b/tests/nodes/gather_elements_u32_default/output_0.cairo deleted file mode 100644 index 4e6add915..000000000 --- a/tests/nodes/gather_elements_u32_default/output_0.cairo +++ /dev/null @@ -1,374 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorSub}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(10); - shape.append(3); - shape.append(4); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(36); - data.append(1); - data.append(2); - data.append(3); - data.append(76); - data.append(41); - data.append(42); - data.append(79); - data.append(44); - data.append(9); - data.append(46); - data.append(83); - data.append(12); - data.append(49); - data.append(14); - data.append(51); - data.append(88); - data.append(53); - data.append(54); - data.append(55); - data.append(20); - data.append(57); - data.append(58); - data.append(23); - data.append(96); - data.append(61); - data.append(62); - data.append(99); - data.append(64); - data.append(101); - data.append(102); - data.append(31); - data.append(68); - data.append(33); - data.append(106); - data.append(35); - data.append(36); - data.append(1); - data.append(38); - data.append(3); - data.append(4); - data.append(77); - data.append(78); - data.append(79); - data.append(8); - data.append(45); - data.append(10); - data.append(11); - data.append(12); - data.append(49); - data.append(86); - data.append(87); - data.append(52); - data.append(17); - data.append(90); - data.append(91); - data.append(92); - data.append(93); - data.append(22); - data.append(95); - data.append(60); - data.append(61); - data.append(62); - data.append(99); - data.append(28); - data.append(29); - data.append(66); - data.append(31); - data.append(68); - data.append(69); - data.append(70); - data.append(107); - data.append(72); - data.append(37); - data.append(38); - data.append(75); - data.append(4); - data.append(77); - data.append(42); - data.append(7); - data.append(44); - data.append(9); - data.append(10); - data.append(83); - data.append(48); - data.append(49); - data.append(14); - data.append(15); - data.append(88); - data.append(89); - data.append(90); - data.append(91); - data.append(92); - data.append(57); - data.append(58); - data.append(59); - data.append(96); - data.append(97); - data.append(98); - data.append(63); - data.append(28); - data.append(65); - data.append(66); - data.append(103); - data.append(104); - data.append(33); - data.append(34); - data.append(35); - data.append(36); - data.append(1); - data.append(38); - data.append(75); - data.append(40); - data.append(41); - data.append(42); - data.append(79); - data.append(44); - data.append(45); - data.append(82); - data.append(83); - data.append(12); - data.append(13); - data.append(50); - data.append(51); - data.append(16); - data.append(53); - data.append(18); - data.append(91); - data.append(56); - data.append(21); - data.append(58); - data.append(23); - data.append(60); - data.append(61); - data.append(62); - data.append(27); - data.append(28); - data.append(101); - data.append(102); - data.append(103); - data.append(104); - data.append(105); - data.append(106); - data.append(71); - data.append(36); - data.append(1); - data.append(2); - data.append(75); - data.append(4); - data.append(5); - data.append(78); - data.append(79); - data.append(80); - data.append(45); - data.append(82); - data.append(47); - data.append(12); - data.append(85); - data.append(86); - data.append(15); - data.append(88); - data.append(89); - data.append(18); - data.append(55); - data.append(20); - data.append(57); - data.append(22); - data.append(95); - data.append(96); - data.append(25); - data.append(62); - data.append(27); - data.append(64); - data.append(101); - data.append(30); - data.append(103); - data.append(32); - data.append(69); - data.append(34); - data.append(35); - data.append(72); - data.append(37); - data.append(74); - data.append(39); - data.append(40); - data.append(77); - data.append(6); - data.append(7); - data.append(8); - data.append(81); - data.append(82); - data.append(11); - data.append(84); - data.append(13); - data.append(50); - data.append(87); - data.append(88); - data.append(89); - data.append(54); - data.append(91); - data.append(20); - data.append(93); - data.append(94); - data.append(23); - data.append(24); - data.append(25); - data.append(98); - data.append(63); - data.append(28); - data.append(101); - data.append(30); - data.append(31); - data.append(32); - data.append(105); - data.append(70); - data.append(107); - data.append(36); - data.append(37); - data.append(74); - data.append(3); - data.append(40); - data.append(77); - data.append(42); - data.append(43); - data.append(8); - data.append(9); - data.append(10); - data.append(47); - data.append(84); - data.append(49); - data.append(86); - data.append(15); - data.append(52); - data.append(53); - data.append(54); - data.append(55); - data.append(20); - data.append(93); - data.append(94); - data.append(59); - data.append(24); - data.append(61); - data.append(98); - data.append(27); - data.append(100); - data.append(101); - data.append(66); - data.append(31); - data.append(104); - data.append(105); - data.append(70); - data.append(71); - data.append(0); - data.append(73); - data.append(2); - data.append(39); - data.append(4); - data.append(5); - data.append(42); - data.append(7); - data.append(8); - data.append(81); - data.append(46); - data.append(11); - data.append(84); - data.append(13); - data.append(86); - data.append(87); - data.append(16); - data.append(17); - data.append(54); - data.append(91); - data.append(92); - data.append(57); - data.append(94); - data.append(23); - data.append(96); - data.append(61); - data.append(98); - data.append(27); - data.append(100); - data.append(101); - data.append(66); - data.append(103); - data.append(104); - data.append(33); - data.append(106); - data.append(107); - data.append(72); - data.append(37); - data.append(2); - data.append(39); - data.append(40); - data.append(5); - data.append(78); - data.append(7); - data.append(44); - data.append(9); - data.append(82); - data.append(83); - data.append(12); - data.append(85); - data.append(50); - data.append(15); - data.append(16); - data.append(53); - data.append(18); - data.append(19); - data.append(92); - data.append(21); - data.append(58); - data.append(95); - data.append(60); - data.append(61); - data.append(62); - data.append(63); - data.append(64); - data.append(65); - data.append(30); - data.append(31); - data.append(32); - data.append(105); - data.append(34); - data.append(71); - data.append(0); - data.append(1); - data.append(38); - data.append(3); - data.append(76); - data.append(5); - data.append(78); - data.append(43); - data.append(80); - data.append(45); - data.append(46); - data.append(47); - data.append(48); - data.append(85); - data.append(50); - data.append(51); - data.append(52); - data.append(89); - data.append(18); - data.append(55); - data.append(92); - data.append(57); - data.append(94); - data.append(23); - data.append(24); - data.append(25); - data.append(62); - data.append(27); - data.append(28); - data.append(65); - data.append(66); - data.append(31); - data.append(32); - data.append(69); - data.append(106); - data.append(107); - TensorTrait::new(shape.span(), data.span()) -} From 7a56ab437d2ca657550449595a8837ac6ffa7507 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Sat, 23 Mar 2024 09:11:25 +0100 Subject: [PATCH 44/68] update doc --- docs/framework/operators/tensor/tensor.gather_elements.md | 8 ++++---- src/operators/tensor/core.cairo | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/framework/operators/tensor/tensor.gather_elements.md b/docs/framework/operators/tensor/tensor.gather_elements.md index 9bda94eb6..b490db030 100644 --- a/docs/framework/operators/tensor/tensor.gather_elements.md +++ b/docs/framework/operators/tensor/tensor.gather_elements.md @@ -1,7 +1,7 @@ # tensor.gather_elements ```rust - fn gather_elements(self: @Tensor, indices: Tensor, axis: Option) -> Tensor; + fn gather_elements(self: @Tensor, indices: Tensor, axis: Option) -> Tensor; ``` GatherElements is an indexing operation that produces its output by indexing into the input data tensor at index positions determined by elements of the indices tensor. @@ -9,8 +9,8 @@ GatherElements is an indexing operation that produces its output by indexing int ## Args * `self`(`@Tensor`) - The input tensor. -* `indices`(`Tensor`) - Tensor of indices. -* `axis`(`Option`) - Axis to gather_elements on. Default: axis=0. +* `indices`(`Tensor`) - Tensor of indices. +* `axis`(`Option`) - Axis to gather_elements on. Default: axis=0. ## Panics @@ -32,7 +32,7 @@ fn gather_elements_example() -> Tensor { shape: array![3, 3].span(), data: array![[ 1, 2, 3],[4, 5, 6], [7, 8, 9]].span(), ); - let indices = TensorTrait::::new( + let indices = TensorTrait::::new( shape: array![1, 2, 0].span(), data: array![2, 0, 0].span(), ); diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 46780007a..2348bff86 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -4238,8 +4238,8 @@ trait TensorTrait { /// ## Args /// /// * `self`(`@Tensor`) - The input tensor. - /// * `indices`(`Tensor`) - Tensor of indices. - /// * `axis`(`Option`) - Axis to gather_elements on. Default: axis=0. + /// * `indices`(`Tensor`) - Tensor of indices. + /// * `axis`(`Option`) - Axis to gather_elements on. Default: axis=0. /// /// ## Panics /// @@ -4261,7 +4261,7 @@ trait TensorTrait { /// shape: array![3, 3].span(), /// data: array![[ 1, 2, 3],[4, 5, 6], [7, 8, 9]].span(), /// ); - /// let indices = TensorTrait::::new( + /// let indices = TensorTrait::::new( /// shape: array![1, 2, 0].span(), /// data: array![2, 0, 0].span(), /// ); From 5d9f4b1fe8c3eb5adf026af098525c3d2909510e Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Sat, 23 Mar 2024 09:27:43 +0100 Subject: [PATCH 45/68] refactor operator --- nodegen/node/reduce_sum.py | 304 +++--------------- src/operators/nn/functional/logsoftmax.cairo | 6 +- src/operators/nn/functional/softmax.cairo | 6 +- src/operators/tensor/core.cairo | 7 +- .../tensor/implementations/tensor_bool.cairo | 21 +- .../implementations/tensor_complex64.cairo | 27 +- .../implementations/tensor_fp16x16.cairo | 9 +- .../implementations/tensor_fp16x16wide.cairo | 9 +- .../implementations/tensor_fp32x32.cairo | 9 +- .../implementations/tensor_fp64x64.cairo | 9 +- .../implementations/tensor_fp8x23.cairo | 27 +- .../implementations/tensor_fp8x23wide.cairo | 9 +- .../tensor/implementations/tensor_i32.cairo | 9 +- .../tensor/implementations/tensor_i8.cairo | 9 +- .../tensor/implementations/tensor_u32.cairo | 9 +- .../tensor/math/layer_normalization.cairo | 5 +- src/operators/tensor/math/reduce_l1.cairo | 2 +- src/operators/tensor/math/reduce_l2.cairo | 4 +- .../tensor/math/reduce_log_sum.cairo | 2 +- src/operators/tensor/math/reduce_sum.cairo | 143 +++++--- .../tensor/math/reduce_sum_square.cairo | 2 +- 21 files changed, 263 insertions(+), 365 deletions(-) diff --git a/nodegen/node/reduce_sum.py b/nodegen/node/reduce_sum.py index 111724001..029aac00a 100644 --- a/nodegen/node/reduce_sum.py +++ b/nodegen/node/reduce_sum.py @@ -4,285 +4,63 @@ class Reduce_sum(RunAll): - @staticmethod - def reduce_sum_u32(): - def reduce_sum_1D(): - x = np.array([0, 1, 2,]).astype(np.uint32) - y = np.array([3]).astype(np.uint32) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "reduce_sum_u32_1D" - make_test( - [x], y, "input_0.reduce_sum(0, false)", name) - - def reduce_sum_2D(): - def default(): - x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) - y = np.array([2, 4]).astype(np.uint32) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "reduce_sum_u32_2D_default" - make_test( - [x], y, "input_0.reduce_sum(0, false)", name) - - def keepdims(): - x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) - y = np.array([2, 4]).astype(np.uint32).reshape(1, 2) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "reduce_sum_u32_2D_keepdims" - make_test( - [x], y, "input_0.reduce_sum(0, true)", name) - - def axis_1(): - x = np.array([0, 1, 2, 3]).astype(np.uint32).reshape(2, 2) - y = np.array([1, 5]).astype(np.uint32) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "reduce_sum_u32_2D_axis_1" - make_test( - [x], y, "input_0.reduce_sum(1, false)", name) - - default() - keepdims() - axis_1() - reduce_sum_1D() - reduce_sum_2D() @staticmethod - def reduce_sum_i32(): - def reduce_sum_1D(): - x = np.array([0, 1, 2,]).astype(np.int32) - y = np.array([3]).astype(np.int32) + def reduce_sum_no_keep_dims(): + axes = np.array([1], dtype=np.uint32) + keepdims = 0 - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) + x = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [ + [9, 10], [11, 12]]]).astype(np.uint32) + y = np.sum(x, axis=tuple(axes.tolist()), keepdims=keepdims == 1) - name = "reduce_sum_i32_1D" - make_test( - [x], y, "input_0.reduce_sum(0, false)", name) + x = Tensor(Dtype.U32, x.shape, x.flatten()) + y = Tensor(Dtype.U32, y.shape, y.flatten()) - def reduce_sum_2D(): - def default(): - x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) - y = np.array([2, 4]).astype(np.int32) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reduce_sum_i32_2D_default" - make_test( - [x], y, "input_0.reduce_sum(0, false)", name) - - def keepdims(): - x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) - y = np.array([2, 4]).astype(np.int32).reshape(1, 2) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reduce_sum_i32_2D_keepdims" - make_test( - [x], y, "input_0.reduce_sum(0, true)", name) - - def axis_1(): - x = np.array([0, 1, 2, 3]).astype(np.int32).reshape(2, 2) - y = np.array([1, 5]).astype(np.int32) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.I32, y.shape, y.flatten()) - - name = "reduce_sum_i32_2D_axis_1" - make_test( - [x], y, "input_0.reduce_sum(1, false)", name) - - default() - keepdims() - axis_1() - reduce_sum_1D() - reduce_sum_2D() + name = "reduce_sum_no_keep_dims" + make_test( + [x], y, "input_0.reduce_sum(Option::Some(array![1].span()), Option::Some(false), Option::None)", name) @staticmethod - def reduce_sum_i8(): - def reduce_sum_1D(): - x = np.array([0, 1, 2,]).astype(np.int8) - y = np.array([3]).astype(np.int8) - - x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) - y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) - - name = "reduce_sum_i8_1D" - make_test( - [x], y, "input_0.reduce_sum(0, false)", name) - - def reduce_sum_2D(): - def default(): - x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) - y = np.array([2, 4]).astype(np.int8) - - x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) - y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) + def reduce_sum_keep_dims(): + axes = np.array([1], dtype=np.uint32) + keepdims = 1 - name = "reduce_sum_i8_2D_default" - make_test( - [x], y, "input_0.reduce_sum(0, false)", name) + x = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [ + [9, 10], [11, 12]]]).astype(np.uint32) + y = np.sum(x, axis=tuple(axes.tolist()), keepdims=keepdims == 1) - def keepdims(): - x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) - y = np.array([2, 4]).astype(np.int8).reshape(1, 2) + x = Tensor(Dtype.U32, x.shape, x.flatten()) + y = Tensor(Dtype.U32, y.shape, y.flatten()) - x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) - y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) - - name = "reduce_sum_i8_2D_keepdims" - make_test( - [x], y, "input_0.reduce_sum(0, true)", name) - - def axis_1(): - x = np.array([0, 1, 2, 3]).astype(np.int8).reshape(2, 2) - y = np.array([1, 5]).astype(np.int8) - - x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) - y = Tensor(Dtype.FP8x23, y.shape, y.flatten()) - - name = "reduce_sum_i8_2D_axis_1" - make_test( - [x], y, "input_0.reduce_sum(1, false)", name) - - default() - keepdims() - axis_1() - reduce_sum_1D() - reduce_sum_2D() + name = "reduce_sum_keep_dims" + make_test( + [x], y, "input_0.reduce_sum(Option::Some(array![1].span()), Option::Some(true), Option::None)", name) @staticmethod - def reduce_sum_fp8x23(): - def reduce_sum_1D(): - x = np.array([0, 1, 2,]).astype(np.int64) - y = np.array([3]).astype(np.int64) - - x = Tensor(Dtype.FP8x23, x.shape, to_fp( - x.flatten(), FixedImpl.FP8x23)) - y = Tensor(Dtype.FP8x23, y.shape, to_fp( - y.flatten(), FixedImpl.FP8x23)) - - name = "reduce_sum_fp8x23_1D" - make_test( - [x], y, "input_0.reduce_sum(0, false)", name) - - def reduce_sum_2D(): - def default(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([2, 4]).astype(np.int64) - - x = Tensor(Dtype.FP8x23, x.shape, to_fp( - x.flatten(), FixedImpl.FP8x23)) - y = Tensor(Dtype.FP8x23, y.shape, to_fp( - y.flatten(), FixedImpl.FP8x23)) - - name = "reduce_sum_fp8x23_2D_default" - make_test( - [x], y, "input_0.reduce_sum(0, false)", name) - - def keepdims(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([2, 4]).astype(np.int64).reshape(1, 2) - - x = Tensor(Dtype.FP8x23, x.shape, to_fp( - x.flatten(), FixedImpl.FP8x23)) - y = Tensor(Dtype.FP8x23, y.shape, to_fp( - y.flatten(), FixedImpl.FP8x23)) - - name = "reduce_sum_fp8x23_2D_keepdims" - make_test( - [x], y, "input_0.reduce_sum(0, true)", name) + def reduce_sum_default_axes_keepdims(): + keepdims = 1 - def axis_1(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([1, 5]).astype(np.int64) + x = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [ + [9, 10], [11, 12]]]).astype(np.uint32) + y = np.sum(x, axis=None, keepdims=keepdims == 1) - x = Tensor(Dtype.FP8x23, x.shape, to_fp( - x.flatten(), FixedImpl.FP8x23)) - y = Tensor(Dtype.FP8x23, y.shape, to_fp( - y.flatten(), FixedImpl.FP8x23)) + x = Tensor(Dtype.U32, x.shape, x.flatten()) + y = Tensor(Dtype.U32, y.shape, y.flatten()) - name = "reduce_sum_fp8x23_2D_axis_1" - make_test( - [x], y, "input_0.reduce_sum(1, false)", name) - - default() - keepdims() - axis_1() - - reduce_sum_1D() - reduce_sum_2D() + name = "reduce_sum_default_axes_keepdims" + make_test( + [x], y, "input_0.reduce_sum(Option::Some(array![].span()), Option::Some(true), Option::None)", name) @staticmethod - def reduce_sum_fp16x16(): - def reduce_sum_1D(): - x = np.array([0, 1, 2,]).astype(np.int64) - y = np.array([3]).astype(np.int64) - - x = Tensor(Dtype.FP16x16, x.shape, to_fp( - x.flatten(), FixedImpl.FP16x16)) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "reduce_sum_fp16x16_1D" - make_test( - [x], y, "input_0.reduce_sum(0, false)", name) - - def reduce_sum_2D(): - def default(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([2, 4]).astype(np.int64) - - x = Tensor(Dtype.FP16x16, x.shape, to_fp( - x.flatten(), FixedImpl.FP16x16)) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "reduce_sum_fp16x16_2D_default" - make_test( - [x], y, "input_0.reduce_sum(0, false)", name) - - def keepdims(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([2, 4]).astype(np.int64).reshape(1, 2) - - x = Tensor(Dtype.FP16x16, x.shape, to_fp( - x.flatten(), FixedImpl.FP16x16)) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "reduce_sum_fp16x16_2D_keepdims" - make_test( - [x], y, "input_0.reduce_sum(0, true)", name) - - def axis_1(): - x = np.array([0, 1, 2, 3]).astype(np.int64).reshape(2, 2) - y = np.array([1, 5]).astype(np.int64) - - x = Tensor(Dtype.FP16x16, x.shape, to_fp( - x.flatten(), FixedImpl.FP16x16)) - y = Tensor(Dtype.FP16x16, y.shape, to_fp( - y.flatten(), FixedImpl.FP16x16)) - - name = "reduce_sum_fp16x16_2D_axis_1" - make_test( - [x], y, "input_0.reduce_sum(1, false)", name) + def reduce_sum_empty_axes_input_noop(): + x = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [ + [9, 10], [11, 12]]]).astype(np.uint32) + y = np.array(x) - default() - keepdims() - axis_1() + x = Tensor(Dtype.U32, x.shape, x.flatten()) + y = Tensor(Dtype.U32, y.shape, y.flatten()) - reduce_sum_1D() - reduce_sum_2D() + name = "reduce_sum_empty_axes_input_noop" + make_test( + [x], y, "input_0.reduce_sum(Option::None, Option::Some(true), Option::Some(true))", name) \ No newline at end of file diff --git a/src/operators/nn/functional/logsoftmax.cairo b/src/operators/nn/functional/logsoftmax.cairo index fdf89c43d..690463149 100644 --- a/src/operators/nn/functional/logsoftmax.cairo +++ b/src/operators/nn/functional/logsoftmax.cairo @@ -10,7 +10,8 @@ fn logsoftmax< z: @Tensor, axis: usize ) -> Tensor { let exp_tensor = z.exp(); - let sum = exp_tensor.reduce_sum(axis, true); + let sum = exp_tensor + .reduce_sum(Option::Some(array![axis].span()), Option::Some(true), Option::Some(false)); let softmax = exp_tensor / sum; let logsoftmax = softmax.log(); @@ -38,7 +39,8 @@ fn logsoftmaxWide< z: @Tensor, axis: usize ) -> Tensor { let exp_tensor: Tensor = exp_upcast(*z); - let sum = exp_tensor.reduce_sum(axis, true); + let sum = exp_tensor + .reduce_sum(Option::Some(array![axis].span()), Option::Some(true), Option::Some(false)); let softmax = div_downcast(@exp_tensor, @sum); softmax.log() diff --git a/src/operators/nn/functional/softmax.cairo b/src/operators/nn/functional/softmax.cairo index 10602bde7..7cc7913b2 100644 --- a/src/operators/nn/functional/softmax.cairo +++ b/src/operators/nn/functional/softmax.cairo @@ -13,7 +13,8 @@ fn softmax< z: @Tensor, axis: usize ) -> Tensor { let exp_tensor = z.exp(); - let sum = exp_tensor.reduce_sum(axis, true); + let sum = exp_tensor + .reduce_sum(Option::Some(array![axis].span()), Option::Some(true), Option::Some(false)); exp_tensor / sum } @@ -39,7 +40,8 @@ fn softmaxWide< z: @Tensor, axis: usize ) -> Tensor { let exp_tensor: Tensor = exp_upcast(*z); - let sum = exp_tensor.reduce_sum(axis, true); + let sum = exp_tensor + .reduce_sum(Option::Some(array![axis].span()), Option::Some(true), Option::Some(false)); div_downcast(@exp_tensor, @sum) } diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 2fe512bc7..d38ac3f29 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -681,7 +681,12 @@ trait TensorTrait { /// >>> [[4,6],[8,10]] /// ``` /// - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; + fn reduce_sum( + self: @Tensor, + axes: Option>, + keepdims: Option, + noop_with_empty_axes: Option + ) -> Tensor; /// # tensor.argmax /// /// ```rust diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index be929b5b2..59021c896 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -64,7 +64,12 @@ impl BoolTensor of TensorTrait { reshape(self, target_shape, allowzero) } - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { + fn reduce_sum( + self: @Tensor, + axes: Option>, + keepdims: Option, + noop_with_empty_axes: Option + ) -> Tensor { panic(array!['not supported!']) } @@ -570,17 +575,19 @@ impl BoolTryIntobool of TryInto { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 + && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() != 0 && is_eq { - is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); - }; + while lhs.data.len() != 0 + && is_eq { + is_eq = lhs.data.pop_front().unwrap() == rhs.data.pop_front().unwrap(); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index a635bb84f..995376bc3 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -69,12 +69,19 @@ impl Complex64Tensor of TensorTrait { unravel_index(index, *self.shape) } - fn reshape(self: @Tensor, target_shape: Span, allowzero: bool) -> Tensor { + fn reshape( + self: @Tensor, target_shape: Span, allowzero: bool + ) -> Tensor { reshape(self, target_shape, allowzero) } - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum::reduce_sum(self, axis, keepdims) + fn reduce_sum( + self: @Tensor, + axes: Option>, + keepdims: Option, + noop_with_empty_axes: Option + ) -> Tensor { + math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { @@ -668,17 +675,19 @@ fn eq(lhs: @complex64, rhs: @complex64) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 + && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() != 0 && is_eq { - is_eq = eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); - }; + while lhs.data.len() != 0 + && is_eq { + is_eq = eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index 846b9d73f..0b767f4f1 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -75,8 +75,13 @@ impl FP16x16Tensor of TensorTrait { reshape(self, target_shape, allowzero) } - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum::reduce_sum(self, axis, keepdims) + fn reduce_sum( + self: @Tensor, + axes: Option>, + keepdims: Option, + noop_with_empty_axes: Option + ) -> Tensor { + math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index ed87491c3..0ba480372 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -79,8 +79,13 @@ impl FP16x16WTensor of TensorTrait { reshape(self, target_shape, allowzero) } - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum::reduce_sum(self, axis, keepdims) + fn reduce_sum( + self: @Tensor, + axes: Option>, + keepdims: Option, + noop_with_empty_axes: Option + ) -> Tensor { + math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index f215f10f4..43da59d7d 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -72,8 +72,13 @@ impl FP32x32Tensor of TensorTrait { reshape(self, target_shape, allowzero) } - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum::reduce_sum(self, axis, keepdims) + fn reduce_sum( + self: @Tensor, + axes: Option>, + keepdims: Option, + noop_with_empty_axes: Option + ) -> Tensor { + math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 9b8811486..e61f3e059 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -72,8 +72,13 @@ impl FP64x64Tensor of TensorTrait { reshape(self, target_shape, allowzero) } - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum::reduce_sum(self, axis, keepdims) + fn reduce_sum( + self: @Tensor, + axes: Option>, + keepdims: Option, + noop_with_empty_axes: Option + ) -> Tensor { + math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index 6e2e931e2..279b40ed8 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -72,8 +72,13 @@ impl FP8x23Tensor of TensorTrait { reshape(self, target_shape, allowzero) } - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum::reduce_sum(self, axis, keepdims) + fn reduce_sum( + self: @Tensor, + axes: Option>, + keepdims: Option, + noop_with_empty_axes: Option + ) -> Tensor { + math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { @@ -345,9 +350,7 @@ impl FP8x23Tensor of TensorTrait { core_ops::slice::(self, starts, ends, axes, steps) } - fn gather( - self: @Tensor, indices: Tensor, axis: Option - ) -> Tensor { + fn gather(self: @Tensor, indices: Tensor, axis: Option) -> Tensor { math::gather::gather(self, indices, axis) } @@ -777,17 +780,19 @@ fn relative_eq(lhs: @FP8x23, rhs: @FP8x23) -> bool { fn tensor_eq(mut lhs: Tensor, mut rhs: Tensor,) -> bool { let mut is_eq = true; - while lhs.shape.len() != 0 && is_eq { - is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); - }; + while lhs.shape.len() != 0 + && is_eq { + is_eq = lhs.shape.pop_front().unwrap() == rhs.shape.pop_front().unwrap(); + }; if !is_eq { return false; } - while lhs.data.len() != 0 && is_eq { - is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); - }; + while lhs.data.len() != 0 + && is_eq { + is_eq = relative_eq(lhs.data.pop_front().unwrap(), rhs.data.pop_front().unwrap()); + }; is_eq } diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index dcc27247b..0090d0501 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -75,8 +75,13 @@ impl FP8x23WTensor of TensorTrait { reshape(self, target_shape, allowzero) } - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum::reduce_sum(self, axis, keepdims) + fn reduce_sum( + self: @Tensor, + axes: Option>, + keepdims: Option, + noop_with_empty_axes: Option + ) -> Tensor { + math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index d0c779e7b..d805437e3 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -72,8 +72,13 @@ impl I32Tensor of TensorTrait { reshape(self, target_shape, allowzero) } - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum::reduce_sum(self, axis, keepdims) + fn reduce_sum( + self: @Tensor, + axes: Option>, + keepdims: Option, + noop_with_empty_axes: Option + ) -> Tensor { + math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) } diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index e05b05d81..026d8e750 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -70,8 +70,13 @@ impl I8Tensor of TensorTrait { reshape(self, target_shape, allowzero) } - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum::reduce_sum(self, axis, keepdims) + fn reduce_sum( + self: @Tensor, + axes: Option>, + keepdims: Option, + noop_with_empty_axes: Option + ) -> Tensor { + math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index 0723f9cc8..b5d1f9368 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -69,8 +69,13 @@ impl U32Tensor of TensorTrait { reshape(self, target_shape, allowzero) } - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { - math::reduce_sum::reduce_sum(self, axis, keepdims) + fn reduce_sum( + self: @Tensor, + axes: Option>, + keepdims: Option, + noop_with_empty_axes: Option + ) -> Tensor { + math::reduce_sum::reduce_sum(self, axes, keepdims, noop_with_empty_axes) } fn reduce_prod(self: @Tensor, axis: usize, keepdims: bool) -> Tensor { diff --git a/src/operators/tensor/math/layer_normalization.cairo b/src/operators/tensor/math/layer_normalization.cairo index 4adfdca91..b6aa33ec0 100644 --- a/src/operators/tensor/math/layer_normalization.cairo +++ b/src/operators/tensor/math/layer_normalization.cairo @@ -92,13 +92,14 @@ fn layer_normalization< one_tensor.append(NumberTrait::one()); let x_mat = self.reshape(shape_matrix.span(), false); - let x_mean = x_mat.reduce_sum(1, true) + let x_mean = x_mat + .reduce_sum(Option::Some(array![1].span()), Option::Some(true), Option::Some(false)) / TensorTrait::new(shape_one.span(), col_number_tensor.span()); let x_diff = x_mat - x_mean; let x_squared_diff = x_diff * x_diff; - let variance = x_squared_diff.reduce_sum(1, true) + let variance = x_squared_diff.reduce_sum(Option::Some(array![1].span()), Option::Some(true), Option::Some(false)) / TensorTrait::new(shape_one.span(), col_number_tensor.span()); let variance_eps = variance + TensorTrait::new(shape_one.span(), epsilon_tensor.span()); diff --git a/src/operators/tensor/math/reduce_l1.cairo b/src/operators/tensor/math/reduce_l1.cairo index ba2be9215..9ccbd4613 100644 --- a/src/operators/tensor/math/reduce_l1.cairo +++ b/src/operators/tensor/math/reduce_l1.cairo @@ -16,5 +16,5 @@ fn reduce_l1< ) -> Tensor { let data_abs = self.abs(); - data_abs.reduce_sum(axis: axis, keepdims: keepdims) + data_abs.reduce_sum(Option::Some(array![axis].span()), Option::Some(keepdims), Option::Some(false)) } diff --git a/src/operators/tensor/math/reduce_l2.cairo b/src/operators/tensor/math/reduce_l2.cairo index 96f4b7245..56355942c 100644 --- a/src/operators/tensor/math/reduce_l2.cairo +++ b/src/operators/tensor/math/reduce_l2.cairo @@ -46,7 +46,7 @@ fn reduce_l2< self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { let tensor_square = square(self); - let tensor_square_sum = tensor_square.reduce_sum(axis: axis, keepdims: keepdims); + let tensor_square_sum = tensor_square.reduce_sum(Option::Some(array![axis].span()), Option::Some(keepdims), Option::Some(false)); tensor_square_sum.sqrt() } @@ -64,7 +64,7 @@ fn reduce_l2_complex< self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { let mut tensor_square = square(@self.abs()); - let mut tensor_square_sum = tensor_square.reduce_sum(axis: axis, keepdims: keepdims); + let mut tensor_square_sum = tensor_square.reduce_sum(Option::Some(array![axis].span()), Option::Some(keepdims), Option::Some(false)); tensor_square_sum.sqrt() } diff --git a/src/operators/tensor/math/reduce_log_sum.cairo b/src/operators/tensor/math/reduce_log_sum.cairo index 60a5225cb..7b64841ce 100644 --- a/src/operators/tensor/math/reduce_log_sum.cairo +++ b/src/operators/tensor/math/reduce_log_sum.cairo @@ -15,7 +15,7 @@ fn reduce_log_sum< >( self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { - let tensor_square_sum = self.reduce_sum(axis: axis, keepdims: keepdims); + let tensor_square_sum = self.reduce_sum(Option::Some(array![axis].span()), Option::Some(keepdims), Option::Some(false)); let tensor_square_sum_log = tensor_square_sum.log(); tensor_square_sum_log diff --git a/src/operators/tensor/math/reduce_sum.cairo b/src/operators/tensor/math/reduce_sum.cairo index 078345f4a..509edb4bf 100644 --- a/src/operators/tensor/math/reduce_sum.cairo +++ b/src/operators/tensor/math/reduce_sum.cairo @@ -1,6 +1,12 @@ +use alexandria_sorting::bubble_sort; +use alexandria_data_structures::array_ext::{SpanTraitExt}; + +use orion::numbers::fixed_point::core::FixedTrait; use orion::numbers::NumberTrait; use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; -use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; +use orion::operators::tensor::helpers::{ + reduce_output_shape, len_from_shape, combine_indices, get_all_axes +}; /// Cf: TensorTrait::reduce_sum docstring fn reduce_sum< @@ -8,48 +14,98 @@ fn reduce_sum< MAG, impl TTensor: TensorTrait, impl TNumber: NumberTrait, - impl TAddEq: AddEq, impl TCopy: Copy, impl TDrop: Drop >( - self: @Tensor, axis: usize, keepdims: bool + self: @Tensor, + axes: Option>, + keepdims: Option, + noop_with_empty_axes: Option ) -> Tensor { - let mut output_data: Array = array![]; - - if (*self.shape).len() == 1 { - assert(axis == 0, 'axis out of dimensions'); - let current_sum = accumulate_sum::(*self.data, *self.shape, *self.shape, axis); - output_data.append(current_sum); + let noop_with_empty_axes = match noop_with_empty_axes { + Option::Some(noop_with_empty_axes) => noop_with_empty_axes, + Option::None => false, + }; + let axes = match axes { + Option::Some(axes) => { + if (axes.len() == 0) { + get_all_axes(*self.shape) + } else { + assert(axes.len() == axes.unique().len(), 'duplicated axis.'); + let mut axes_arr: Array = array![]; + let mut copy_axes = axes; + loop { + match copy_axes.pop_front() { + Option::Some(axis) => { axes_arr.append(*axis); }, + Option::None => { break; } + }; + }; + let sorted_axes = bubble_sort::bubble_sort_elements(axes_arr, true).span(); + sorted_axes + } + }, + Option::None => { + if noop_with_empty_axes { + return *self; + } + get_all_axes(*self.shape) + }, + }; + let keepdims = match keepdims { + Option::Some(keepdims) => keepdims, + Option::None => true, + }; - let mut output_shape: Array = array![]; - output_shape.append(1); + let mut axis_c = 0; + let mut copy_axes = axes; + let mut shape = *self.shape; + let mut data = *self.data; + loop { + match copy_axes.pop_front() { + Option::Some(axis) => { + if (shape.len() == 1) { + let current_sum = accumulate_sum::(data, shape, shape, 0); + shape = array![].span(); + data = array![current_sum].span(); + break (); + } + let mut temp_data = array![]; + let mut temp_shape = reduce_output_shape(shape, *axis - axis_c, false); + let data_len = len_from_shape(temp_shape); + let mut index: usize = 0; + while index != data_len { + let indices = unravel_index(index, temp_shape); + let current_sum = accumulate_sum::(data, shape, indices, *axis - axis_c); - return TensorTrait::new(output_shape.span(), output_data.span()); - } else { - assert(axis <= (*self.shape).len(), 'axis out of dimensions'); - let output_shape = reduce_output_shape(*self.shape, axis, false); - let output_data_len = len_from_shape(output_shape); - let mut index: usize = 0; - while index != output_data_len { - let output_indices = unravel_index(index, output_shape); - let current_sum = accumulate_sum::(*self.data, *self.shape, output_indices, axis); + temp_data.append(current_sum); - output_data.append(current_sum); + index += 1; + }; - index += 1; + shape = temp_shape; + data = temp_data.span(); + axis_c += 1; + }, + Option::None => { break; } }; + }; - if keepdims { - let output_shape = reduce_output_shape(*self.shape, axis, true); + let mut axes_copy = axes; + if keepdims { + shape = *self.shape; + loop { + match axes_copy.pop_front() { + Option::Some(axis) => { shape = reduce_output_shape(shape, *axis, true); }, + Option::None => { break; } + }; + }; - TensorTrait::::new(output_shape, output_data.span()) - } else { - TensorTrait::::new(output_shape, output_data.span()) - } + TensorTrait::::new(shape, data) + } else { + TensorTrait::::new(shape, data) } } - /// Helper function that accumulates the sum of elements along a specific axis. /// /// # Arguments @@ -62,42 +118,35 @@ fn reduce_sum< /// * Panics if gas limit is exceeded during execution. /// /// # Returns -/// * An i32 value representing the accumulated sum along the specified axis. +/// * A value representing the accumulated sum along the specified axis. fn accumulate_sum< - T, - MAG, - impl TNumber: NumberTrait, - impl TAddEq: AddEq, - impl TCopy: Copy, - impl TDrop: Drop + T, MAG, impl TNumber: NumberTrait, + impl TCopy: Copy, impl TDrop: Drop >( mut input_data: Span, input_shape: Span, output_indices: Span, axis: usize ) -> T { let axis_len = *(input_shape)[axis]; - let mut acc: T = NumberTrait::zero(); + let mut sum: T = NumberTrait::zero(); - let mut axis_index: usize = 0; + let mut axis_index = 0; if (input_shape).len() > 1 { - loop { - if axis_index == axis_len { - break (); - } - + while axis_index != axis_len { let input_indices = combine_indices(output_indices, axis_index, axis); let input_index = ravel_index(input_shape, input_indices); let ele = *(input_data)[input_index]; - acc += ele; + sum = NumberTrait::add(sum, ele); + axis_index += 1; }; } else { loop { match input_data.pop_front() { - Option::Some(item) => { acc += *item; }, + Option::Some(item) => sum = NumberTrait::add(sum, *item), Option::None => { break; } }; }; } - return acc; -} + sum +} \ No newline at end of file diff --git a/src/operators/tensor/math/reduce_sum_square.cairo b/src/operators/tensor/math/reduce_sum_square.cairo index b8ad7df99..c2f241625 100644 --- a/src/operators/tensor/math/reduce_sum_square.cairo +++ b/src/operators/tensor/math/reduce_sum_square.cairo @@ -45,7 +45,7 @@ fn reduce_sum_square< self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { let tensor_square = square(self); - let tensor_square_sum = tensor_square.reduce_sum(axis: axis, keepdims: keepdims); + let tensor_square_sum = tensor_square.reduce_sum(Option::Some(array![axis].span()), Option::Some(keepdims), Option::Some(false)); tensor_square_sum } From bf6580716da31b8457937386607513a4095149e9 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Sun, 24 Mar 2024 12:42:38 +0100 Subject: [PATCH 46/68] refactor operator --- src/operators/nn/functional/logsoftmax.cairo | 12 +++++++-- src/operators/nn/functional/softmax.cairo | 12 +++++++-- src/operators/tensor/core.cairo | 2 +- .../tensor/implementations/tensor_bool.cairo | 2 +- .../implementations/tensor_complex64.cairo | 2 +- .../implementations/tensor_fp16x16.cairo | 2 +- .../implementations/tensor_fp16x16wide.cairo | 2 +- .../implementations/tensor_fp32x32.cairo | 2 +- .../implementations/tensor_fp64x64.cairo | 2 +- .../implementations/tensor_fp8x23.cairo | 2 +- .../implementations/tensor_fp8x23wide.cairo | 2 +- .../tensor/implementations/tensor_i32.cairo | 2 +- .../tensor/implementations/tensor_i8.cairo | 2 +- .../tensor/implementations/tensor_u32.cairo | 2 +- src/operators/tensor/math/reduce_l1.cairo | 2 +- src/operators/tensor/math/reduce_l2.cairo | 14 ++++++++-- .../tensor/math/reduce_log_sum.cairo | 7 ++++- src/operators/tensor/math/reduce_sum.cairo | 27 +++++++++++++------ .../tensor/math/reduce_sum_square.cairo | 7 ++++- 19 files changed, 76 insertions(+), 29 deletions(-) diff --git a/src/operators/nn/functional/logsoftmax.cairo b/src/operators/nn/functional/logsoftmax.cairo index 690463149..82283844f 100644 --- a/src/operators/nn/functional/logsoftmax.cairo +++ b/src/operators/nn/functional/logsoftmax.cairo @@ -11,7 +11,11 @@ fn logsoftmax< ) -> Tensor { let exp_tensor = z.exp(); let sum = exp_tensor - .reduce_sum(Option::Some(array![axis].span()), Option::Some(true), Option::Some(false)); + .reduce_sum( + Option::Some(array![axis.try_into().unwrap()].span()), + Option::Some(true), + Option::Some(false) + ); let softmax = exp_tensor / sum; let logsoftmax = softmax.log(); @@ -40,7 +44,11 @@ fn logsoftmaxWide< ) -> Tensor { let exp_tensor: Tensor = exp_upcast(*z); let sum = exp_tensor - .reduce_sum(Option::Some(array![axis].span()), Option::Some(true), Option::Some(false)); + .reduce_sum( + Option::Some(array![axis.try_into().unwrap()].span()), + Option::Some(true), + Option::Some(false) + ); let softmax = div_downcast(@exp_tensor, @sum); softmax.log() diff --git a/src/operators/nn/functional/softmax.cairo b/src/operators/nn/functional/softmax.cairo index 7cc7913b2..ba83438a4 100644 --- a/src/operators/nn/functional/softmax.cairo +++ b/src/operators/nn/functional/softmax.cairo @@ -14,7 +14,11 @@ fn softmax< ) -> Tensor { let exp_tensor = z.exp(); let sum = exp_tensor - .reduce_sum(Option::Some(array![axis].span()), Option::Some(true), Option::Some(false)); + .reduce_sum( + Option::Some(array![axis.try_into().unwrap()].span()), + Option::Some(true), + Option::Some(false) + ); exp_tensor / sum } @@ -41,7 +45,11 @@ fn softmaxWide< ) -> Tensor { let exp_tensor: Tensor = exp_upcast(*z); let sum = exp_tensor - .reduce_sum(Option::Some(array![axis].span()), Option::Some(true), Option::Some(false)); + .reduce_sum( + Option::Some(array![axis.try_into().unwrap()].span()), + Option::Some(true), + Option::Some(false) + ); div_downcast(@exp_tensor, @sum) } diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index d38ac3f29..464dfa19e 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -683,7 +683,7 @@ trait TensorTrait { /// fn reduce_sum( self: @Tensor, - axes: Option>, + axes: Option>, keepdims: Option, noop_with_empty_axes: Option ) -> Tensor; diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index 59021c896..281350760 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -66,7 +66,7 @@ impl BoolTensor of TensorTrait { fn reduce_sum( self: @Tensor, - axes: Option>, + axes: Option>, keepdims: Option, noop_with_empty_axes: Option ) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 995376bc3..dfe5a4237 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -77,7 +77,7 @@ impl Complex64Tensor of TensorTrait { fn reduce_sum( self: @Tensor, - axes: Option>, + axes: Option>, keepdims: Option, noop_with_empty_axes: Option ) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index 0b767f4f1..a73990bcd 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -77,7 +77,7 @@ impl FP16x16Tensor of TensorTrait { fn reduce_sum( self: @Tensor, - axes: Option>, + axes: Option>, keepdims: Option, noop_with_empty_axes: Option ) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index 0ba480372..99e5dac30 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -81,7 +81,7 @@ impl FP16x16WTensor of TensorTrait { fn reduce_sum( self: @Tensor, - axes: Option>, + axes: Option>, keepdims: Option, noop_with_empty_axes: Option ) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 43da59d7d..f5c1b4888 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -74,7 +74,7 @@ impl FP32x32Tensor of TensorTrait { fn reduce_sum( self: @Tensor, - axes: Option>, + axes: Option>, keepdims: Option, noop_with_empty_axes: Option ) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index e61f3e059..23b096bc2 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -74,7 +74,7 @@ impl FP64x64Tensor of TensorTrait { fn reduce_sum( self: @Tensor, - axes: Option>, + axes: Option>, keepdims: Option, noop_with_empty_axes: Option ) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index 279b40ed8..2b770b661 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -74,7 +74,7 @@ impl FP8x23Tensor of TensorTrait { fn reduce_sum( self: @Tensor, - axes: Option>, + axes: Option>, keepdims: Option, noop_with_empty_axes: Option ) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index 0090d0501..7e36b4e5a 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -77,7 +77,7 @@ impl FP8x23WTensor of TensorTrait { fn reduce_sum( self: @Tensor, - axes: Option>, + axes: Option>, keepdims: Option, noop_with_empty_axes: Option ) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index d805437e3..e210becde 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -74,7 +74,7 @@ impl I32Tensor of TensorTrait { fn reduce_sum( self: @Tensor, - axes: Option>, + axes: Option>, keepdims: Option, noop_with_empty_axes: Option ) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 026d8e750..dd66e1361 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -72,7 +72,7 @@ impl I8Tensor of TensorTrait { fn reduce_sum( self: @Tensor, - axes: Option>, + axes: Option>, keepdims: Option, noop_with_empty_axes: Option ) -> Tensor { diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index b5d1f9368..463f4d27d 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -71,7 +71,7 @@ impl U32Tensor of TensorTrait { fn reduce_sum( self: @Tensor, - axes: Option>, + axes: Option>, keepdims: Option, noop_with_empty_axes: Option ) -> Tensor { diff --git a/src/operators/tensor/math/reduce_l1.cairo b/src/operators/tensor/math/reduce_l1.cairo index 9ccbd4613..29b83b69d 100644 --- a/src/operators/tensor/math/reduce_l1.cairo +++ b/src/operators/tensor/math/reduce_l1.cairo @@ -16,5 +16,5 @@ fn reduce_l1< ) -> Tensor { let data_abs = self.abs(); - data_abs.reduce_sum(Option::Some(array![axis].span()), Option::Some(keepdims), Option::Some(false)) + data_abs.reduce_sum(Option::Some(array![axis.try_into().unwrap()].span()), Option::Some(keepdims), Option::Some(false)) } diff --git a/src/operators/tensor/math/reduce_l2.cairo b/src/operators/tensor/math/reduce_l2.cairo index 56355942c..82f2d6e92 100644 --- a/src/operators/tensor/math/reduce_l2.cairo +++ b/src/operators/tensor/math/reduce_l2.cairo @@ -46,7 +46,12 @@ fn reduce_l2< self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { let tensor_square = square(self); - let tensor_square_sum = tensor_square.reduce_sum(Option::Some(array![axis].span()), Option::Some(keepdims), Option::Some(false)); + let tensor_square_sum = tensor_square + .reduce_sum( + Option::Some(array![axis.try_into().unwrap()].span()), + Option::Some(keepdims), + Option::Some(false) + ); tensor_square_sum.sqrt() } @@ -64,7 +69,12 @@ fn reduce_l2_complex< self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { let mut tensor_square = square(@self.abs()); - let mut tensor_square_sum = tensor_square.reduce_sum(Option::Some(array![axis].span()), Option::Some(keepdims), Option::Some(false)); + let mut tensor_square_sum = tensor_square + .reduce_sum( + Option::Some(array![axis.try_into().unwrap()].span()), + Option::Some(keepdims), + Option::Some(false) + ); tensor_square_sum.sqrt() } diff --git a/src/operators/tensor/math/reduce_log_sum.cairo b/src/operators/tensor/math/reduce_log_sum.cairo index 7b64841ce..0cc60f9cb 100644 --- a/src/operators/tensor/math/reduce_log_sum.cairo +++ b/src/operators/tensor/math/reduce_log_sum.cairo @@ -15,7 +15,12 @@ fn reduce_log_sum< >( self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { - let tensor_square_sum = self.reduce_sum(Option::Some(array![axis].span()), Option::Some(keepdims), Option::Some(false)); + let tensor_square_sum = self + .reduce_sum( + Option::Some(array![axis.try_into().unwrap()].span()), + Option::Some(keepdims), + Option::Some(false) + ); let tensor_square_sum_log = tensor_square_sum.log(); tensor_square_sum_log diff --git a/src/operators/tensor/math/reduce_sum.cairo b/src/operators/tensor/math/reduce_sum.cairo index 509edb4bf..ace480561 100644 --- a/src/operators/tensor/math/reduce_sum.cairo +++ b/src/operators/tensor/math/reduce_sum.cairo @@ -1,3 +1,5 @@ +use core::option::OptionTrait; +use core::traits::TryInto; use alexandria_sorting::bubble_sort; use alexandria_data_structures::array_ext::{SpanTraitExt}; @@ -18,7 +20,7 @@ fn reduce_sum< impl TDrop: Drop >( self: @Tensor, - axes: Option>, + axes: Option>, keepdims: Option, noop_with_empty_axes: Option ) -> Tensor { @@ -33,10 +35,20 @@ fn reduce_sum< } else { assert(axes.len() == axes.unique().len(), 'duplicated axis.'); let mut axes_arr: Array = array![]; - let mut copy_axes = axes; + let mut copy_axes = axes.clone(); loop { match copy_axes.pop_front() { - Option::Some(axis) => { axes_arr.append(*axis); }, + Option::Some(axis) => { + // Adjust negative axes to positive + let adjusted_axis = if *axis < 0 { + ((*self.shape).len().try_into().unwrap() + *axis) + .try_into() + .unwrap() + } else { + (*axis).try_into().unwrap() + }; + axes_arr.append(adjusted_axis); + }, Option::None => { break; } }; }; @@ -57,7 +69,7 @@ fn reduce_sum< }; let mut axis_c = 0; - let mut copy_axes = axes; + let mut copy_axes = axes.clone(); let mut shape = *self.shape; let mut data = *self.data; loop { @@ -90,7 +102,7 @@ fn reduce_sum< }; }; - let mut axes_copy = axes; + let mut axes_copy = axes.clone(); if keepdims { shape = *self.shape; loop { @@ -120,8 +132,7 @@ fn reduce_sum< /// # Returns /// * A value representing the accumulated sum along the specified axis. fn accumulate_sum< - T, MAG, impl TNumber: NumberTrait, - impl TCopy: Copy, impl TDrop: Drop + T, MAG, impl TNumber: NumberTrait, impl TCopy: Copy, impl TDrop: Drop >( mut input_data: Span, input_shape: Span, output_indices: Span, axis: usize ) -> T { @@ -149,4 +160,4 @@ fn accumulate_sum< } sum -} \ No newline at end of file +} diff --git a/src/operators/tensor/math/reduce_sum_square.cairo b/src/operators/tensor/math/reduce_sum_square.cairo index c2f241625..fc7789150 100644 --- a/src/operators/tensor/math/reduce_sum_square.cairo +++ b/src/operators/tensor/math/reduce_sum_square.cairo @@ -45,7 +45,12 @@ fn reduce_sum_square< self: @Tensor, axis: usize, keepdims: bool ) -> Tensor { let tensor_square = square(self); - let tensor_square_sum = tensor_square.reduce_sum(Option::Some(array![axis].span()), Option::Some(keepdims), Option::Some(false)); + let tensor_square_sum = tensor_square + .reduce_sum( + Option::Some(array![axis.try_into().unwrap()].span()), + Option::Some(keepdims), + Option::Some(false) + ); tensor_square_sum } From 75988dbdb7c9eb3aeef71fce1f5b77dab2ecd064 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 25 Mar 2024 08:00:12 +0100 Subject: [PATCH 47/68] test operator --- nodegen/node/reduce_sum.py | 16 ++++++++++++ tests/nodes.cairo | 25 ++++-------------- .../reduce_sum_default_axes_keepdims.cairo | 20 ++++++++++++++ .../input_0.cairo | 16 +++++++++--- .../output_0.cairo | 9 ++++--- .../reduce_sum_empty_axes_input_noop.cairo | 20 ++++++++++++++ .../input_0.cairo | 16 +++++++++--- .../output_0.cairo | 26 +++++++++++++++++++ tests/nodes/reduce_sum_fp16x16_1D.cairo | 20 -------------- .../nodes/reduce_sum_fp16x16_1D/input_0.cairo | 15 ----------- .../reduce_sum_fp16x16_1D/output_0.cairo | 13 ---------- .../nodes/reduce_sum_fp16x16_2D_axis_1.cairo | 20 -------------- .../input_0.cairo | 17 ------------ .../output_0.cairo | 14 ---------- .../nodes/reduce_sum_fp16x16_2D_default.cairo | 20 -------------- .../input_0.cairo | 17 ------------ .../output_0.cairo | 14 ---------- .../reduce_sum_fp16x16_2D_keepdims.cairo | 20 -------------- .../input_0.cairo | 17 ------------ .../output_0.cairo | 15 ----------- tests/nodes/reduce_sum_fp8x23_1D.cairo | 20 -------------- .../nodes/reduce_sum_fp8x23_1D/input_0.cairo | 15 ----------- .../nodes/reduce_sum_fp8x23_1D/output_0.cairo | 13 ---------- tests/nodes/reduce_sum_fp8x23_2D_axis_1.cairo | 20 -------------- .../reduce_sum_fp8x23_2D_axis_1/input_0.cairo | 17 ------------ .../output_0.cairo | 14 ---------- .../nodes/reduce_sum_fp8x23_2D_default.cairo | 20 -------------- .../input_0.cairo | 17 ------------ .../output_0.cairo | 14 ---------- .../nodes/reduce_sum_fp8x23_2D_keepdims.cairo | 20 -------------- .../input_0.cairo | 17 ------------ tests/nodes/reduce_sum_i32_1D.cairo | 20 -------------- tests/nodes/reduce_sum_i32_1D/input_0.cairo | 14 ---------- tests/nodes/reduce_sum_i32_2D_axis_1.cairo | 20 -------------- .../reduce_sum_i32_2D_axis_1/output_0.cairo | 13 ---------- tests/nodes/reduce_sum_i32_2D_default.cairo | 20 -------------- tests/nodes/reduce_sum_i32_2D_keepdims.cairo | 20 -------------- tests/nodes/reduce_sum_i8_1D.cairo | 20 -------------- tests/nodes/reduce_sum_i8_1D/input_0.cairo | 15 ----------- tests/nodes/reduce_sum_i8_1D/output_0.cairo | 13 ---------- tests/nodes/reduce_sum_i8_2D_axis_1.cairo | 20 -------------- .../reduce_sum_i8_2D_axis_1/input_0.cairo | 17 ------------ .../reduce_sum_i8_2D_axis_1/output_0.cairo | 14 ---------- tests/nodes/reduce_sum_i8_2D_default.cairo | 20 -------------- .../reduce_sum_i8_2D_default/input_0.cairo | 17 ------------ .../reduce_sum_i8_2D_default/output_0.cairo | 14 ---------- tests/nodes/reduce_sum_i8_2D_keepdims.cairo | 20 -------------- .../reduce_sum_i8_2D_keepdims/input_0.cairo | 17 ------------ .../reduce_sum_i8_2D_keepdims/output_0.cairo | 15 ----------- tests/nodes/reduce_sum_keep_dims.cairo | 20 ++++++++++++++ .../input_0.cairo | 16 +++++++++--- .../output_0.cairo | 12 ++++++--- .../reduce_sum_negative_axes_keepdims.cairo | 20 ++++++++++++++ .../input_0.cairo | 26 +++++++++++++++++++ .../output_0.cairo | 15 +++++++---- tests/nodes/reduce_sum_no_keep_dims.cairo | 20 ++++++++++++++ .../reduce_sum_no_keep_dims/input_0.cairo | 26 +++++++++++++++++++ .../output_0.cairo | 12 ++++++--- 58 files changed, 272 insertions(+), 741 deletions(-) create mode 100644 tests/nodes/reduce_sum_default_axes_keepdims.cairo rename tests/nodes/{reduce_sum_i32_2D_axis_1 => reduce_sum_default_axes_keepdims}/input_0.cairo (50%) rename tests/nodes/{reduce_sum_i32_1D => reduce_sum_default_axes_keepdims}/output_0.cairo (57%) create mode 100644 tests/nodes/reduce_sum_empty_axes_input_noop.cairo rename tests/nodes/{reduce_sum_i32_2D_keepdims => reduce_sum_empty_axes_input_noop}/input_0.cairo (50%) create mode 100644 tests/nodes/reduce_sum_empty_axes_input_noop/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_fp16x16_1D.cairo delete mode 100644 tests/nodes/reduce_sum_fp16x16_1D/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_fp16x16_1D/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_fp16x16_2D_axis_1.cairo delete mode 100644 tests/nodes/reduce_sum_fp16x16_2D_axis_1/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_fp16x16_2D_axis_1/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_fp16x16_2D_default.cairo delete mode 100644 tests/nodes/reduce_sum_fp16x16_2D_default/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_fp16x16_2D_default/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_fp16x16_2D_keepdims.cairo delete mode 100644 tests/nodes/reduce_sum_fp16x16_2D_keepdims/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_fp16x16_2D_keepdims/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_fp8x23_1D.cairo delete mode 100644 tests/nodes/reduce_sum_fp8x23_1D/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_fp8x23_1D/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_fp8x23_2D_axis_1.cairo delete mode 100644 tests/nodes/reduce_sum_fp8x23_2D_axis_1/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_fp8x23_2D_axis_1/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_fp8x23_2D_default.cairo delete mode 100644 tests/nodes/reduce_sum_fp8x23_2D_default/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_fp8x23_2D_default/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_fp8x23_2D_keepdims.cairo delete mode 100644 tests/nodes/reduce_sum_fp8x23_2D_keepdims/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_i32_1D.cairo delete mode 100644 tests/nodes/reduce_sum_i32_1D/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_i32_2D_axis_1.cairo delete mode 100644 tests/nodes/reduce_sum_i32_2D_axis_1/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_i32_2D_default.cairo delete mode 100644 tests/nodes/reduce_sum_i32_2D_keepdims.cairo delete mode 100644 tests/nodes/reduce_sum_i8_1D.cairo delete mode 100644 tests/nodes/reduce_sum_i8_1D/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_i8_1D/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_i8_2D_axis_1.cairo delete mode 100644 tests/nodes/reduce_sum_i8_2D_axis_1/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_i8_2D_axis_1/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_i8_2D_default.cairo delete mode 100644 tests/nodes/reduce_sum_i8_2D_default/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_i8_2D_default/output_0.cairo delete mode 100644 tests/nodes/reduce_sum_i8_2D_keepdims.cairo delete mode 100644 tests/nodes/reduce_sum_i8_2D_keepdims/input_0.cairo delete mode 100644 tests/nodes/reduce_sum_i8_2D_keepdims/output_0.cairo create mode 100644 tests/nodes/reduce_sum_keep_dims.cairo rename tests/nodes/{reduce_sum_i32_2D_default => reduce_sum_keep_dims}/input_0.cairo (50%) rename tests/nodes/{reduce_sum_i32_2D_keepdims => reduce_sum_keep_dims}/output_0.cairo (54%) create mode 100644 tests/nodes/reduce_sum_negative_axes_keepdims.cairo create mode 100644 tests/nodes/reduce_sum_negative_axes_keepdims/input_0.cairo rename tests/nodes/{reduce_sum_fp8x23_2D_keepdims => reduce_sum_negative_axes_keepdims}/output_0.cairo (50%) create mode 100644 tests/nodes/reduce_sum_no_keep_dims.cairo create mode 100644 tests/nodes/reduce_sum_no_keep_dims/input_0.cairo rename tests/nodes/{reduce_sum_i32_2D_default => reduce_sum_no_keep_dims}/output_0.cairo (52%) diff --git a/nodegen/node/reduce_sum.py b/nodegen/node/reduce_sum.py index 029aac00a..d3f311b25 100644 --- a/nodegen/node/reduce_sum.py +++ b/nodegen/node/reduce_sum.py @@ -51,6 +51,22 @@ def reduce_sum_default_axes_keepdims(): name = "reduce_sum_default_axes_keepdims" make_test( [x], y, "input_0.reduce_sum(Option::Some(array![].span()), Option::Some(true), Option::None)", name) + + @staticmethod + def reduce_sum_negative_axes_keepdims(): + axes = np.array([-2], dtype=np.int64) + keepdims = 1 + + x = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [ + [9, 10], [11, 12]]]).astype(np.uint32) + y = np.sum(x, axis=tuple(axes.tolist()), keepdims=keepdims == 1) + + x = Tensor(Dtype.U32, x.shape, x.flatten()) + y = Tensor(Dtype.U32, y.shape, y.flatten()) + + name = "reduce_sum_negative_axes_keepdims" + make_test( + [x], y, "input_0.reduce_sum(Option::Some(array![-2].span()), Option::Some(true), Option::None)", name) @staticmethod def reduce_sum_empty_axes_input_noop(): diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 5d48a9b62..290ba2665 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -278,26 +278,6 @@ // mod or_i8_broadcast; // mod or_u32; // mod or_u32_broadcast; -// mod reduce_sum_fp16x16_1D; -// mod reduce_sum_fp16x16_2D_default; -// mod reduce_sum_fp16x16_2D_keepdims; -// mod reduce_sum_fp16x16_2D_axis_1; -// mod reduce_sum_fp8x23_1D; -// mod reduce_sum_fp8x23_2D_default; -// mod reduce_sum_fp8x23_2D_keepdims; -// mod reduce_sum_fp8x23_2D_axis_1; -// mod reduce_sum_i32_1D; -// mod reduce_sum_i32_2D_default; -// mod reduce_sum_i32_2D_keepdims; -// mod reduce_sum_i32_2D_axis_1; -// mod reduce_sum_i8_1D; -// mod reduce_sum_i8_2D_default; -// mod reduce_sum_i8_2D_keepdims; -// mod reduce_sum_i8_2D_axis_1; -// mod reduce_sum_u32_1D; -// mod reduce_sum_u32_2D_default; -// mod reduce_sum_u32_2D_keepdims; -// mod reduce_sum_u32_2D_axis_1; // mod relu_fp16x16; // mod relu_fp8x23; // mod relu_i32; @@ -1046,3 +1026,8 @@ mod reshape_reordered_all_dims; mod reshape_reordered_last_dims; mod reshape_zero_and_negative_dim; mod reshape_zero_dim; +mod reduce_sum_default_axes_keepdims; +mod reduce_sum_empty_axes_input_noop; +mod reduce_sum_keep_dims; +mod reduce_sum_negative_axes_keepdims; +mod reduce_sum_no_keep_dims; diff --git a/tests/nodes/reduce_sum_default_axes_keepdims.cairo b/tests/nodes/reduce_sum_default_axes_keepdims.cairo new file mode 100644 index 000000000..93bb951e3 --- /dev/null +++ b/tests/nodes/reduce_sum_default_axes_keepdims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_default_axes_keepdims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum(Option::Some(array![].span()), Option::Some(true), Option::None); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_i32_2D_axis_1/input_0.cairo b/tests/nodes/reduce_sum_default_axes_keepdims/input_0.cairo similarity index 50% rename from tests/nodes/reduce_sum_i32_2D_axis_1/input_0.cairo rename to tests/nodes/reduce_sum_default_axes_keepdims/input_0.cairo index bb508695d..2de5818c3 100644 --- a/tests/nodes/reduce_sum_i32_2D_axis_1/input_0.cairo +++ b/tests/nodes/reduce_sum_default_axes_keepdims/input_0.cairo @@ -1,16 +1,26 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorMul}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn input_0() -> Tensor { +fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); + shape.append(3); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); data.append(1); data.append(2); data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + data.append(12); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/reduce_sum_i32_1D/output_0.cairo b/tests/nodes/reduce_sum_default_axes_keepdims/output_0.cairo similarity index 57% rename from tests/nodes/reduce_sum_i32_1D/output_0.cairo rename to tests/nodes/reduce_sum_default_axes_keepdims/output_0.cairo index 286549beb..6cc93d6f7 100644 --- a/tests/nodes/reduce_sum_i32_1D/output_0.cairo +++ b/tests/nodes/reduce_sum_default_axes_keepdims/output_0.cairo @@ -1,12 +1,15 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorMul}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(1); + shape.append(1); + shape.append(1); let mut data = ArrayTrait::new(); - data.append(3); + data.append(78); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/reduce_sum_empty_axes_input_noop.cairo b/tests/nodes/reduce_sum_empty_axes_input_noop.cairo new file mode 100644 index 000000000..94c924e6f --- /dev/null +++ b/tests/nodes/reduce_sum_empty_axes_input_noop.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_empty_axes_input_noop() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum(Option::None, Option::Some(true), Option::Some(true)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_i32_2D_keepdims/input_0.cairo b/tests/nodes/reduce_sum_empty_axes_input_noop/input_0.cairo similarity index 50% rename from tests/nodes/reduce_sum_i32_2D_keepdims/input_0.cairo rename to tests/nodes/reduce_sum_empty_axes_input_noop/input_0.cairo index bb508695d..2de5818c3 100644 --- a/tests/nodes/reduce_sum_i32_2D_keepdims/input_0.cairo +++ b/tests/nodes/reduce_sum_empty_axes_input_noop/input_0.cairo @@ -1,16 +1,26 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorMul}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn input_0() -> Tensor { +fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); + shape.append(3); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); data.append(1); data.append(2); data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + data.append(12); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/reduce_sum_empty_axes_input_noop/output_0.cairo b/tests/nodes/reduce_sum_empty_axes_input_noop/output_0.cairo new file mode 100644 index 000000000..d679605a0 --- /dev/null +++ b/tests/nodes/reduce_sum_empty_axes_input_noop/output_0.cairo @@ -0,0 +1,26 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + data.append(12); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_fp16x16_1D.cairo b/tests/nodes/reduce_sum_fp16x16_1D.cairo deleted file mode 100644 index 3f0522443..000000000 --- a/tests/nodes/reduce_sum_fp16x16_1D.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_fp16x16_1D() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.reduce_sum(0, false); - - assert_eq(y, z); -} diff --git a/tests/nodes/reduce_sum_fp16x16_1D/input_0.cairo b/tests/nodes/reduce_sum_fp16x16_1D/input_0.cairo deleted file mode 100644 index 38c052c06..000000000 --- a/tests/nodes/reduce_sum_fp16x16_1D/input_0.cairo +++ /dev/null @@ -1,15 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_fp16x16_1D/output_0.cairo b/tests/nodes/reduce_sum_fp16x16_1D/output_0.cairo deleted file mode 100644 index a2e4a88f1..000000000 --- a/tests/nodes/reduce_sum_fp16x16_1D/output_0.cairo +++ /dev/null @@ -1,13 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 196608, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_fp16x16_2D_axis_1.cairo b/tests/nodes/reduce_sum_fp16x16_2D_axis_1.cairo deleted file mode 100644 index 6d0a4e86c..000000000 --- a/tests/nodes/reduce_sum_fp16x16_2D_axis_1.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_fp16x16_2D_axis_1() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.reduce_sum(1, false); - - assert_eq(y, z); -} diff --git a/tests/nodes/reduce_sum_fp16x16_2D_axis_1/input_0.cairo b/tests/nodes/reduce_sum_fp16x16_2D_axis_1/input_0.cairo deleted file mode 100644 index 00c61e821..000000000 --- a/tests/nodes/reduce_sum_fp16x16_2D_axis_1/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_fp16x16_2D_axis_1/output_0.cairo b/tests/nodes/reduce_sum_fp16x16_2D_axis_1/output_0.cairo deleted file mode 100644 index 2eb416252..000000000 --- a/tests/nodes/reduce_sum_fp16x16_2D_axis_1/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 327680, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_fp16x16_2D_default.cairo b/tests/nodes/reduce_sum_fp16x16_2D_default.cairo deleted file mode 100644 index 62fc72403..000000000 --- a/tests/nodes/reduce_sum_fp16x16_2D_default.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_fp16x16_2D_default() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.reduce_sum(0, false); - - assert_eq(y, z); -} diff --git a/tests/nodes/reduce_sum_fp16x16_2D_default/input_0.cairo b/tests/nodes/reduce_sum_fp16x16_2D_default/input_0.cairo deleted file mode 100644 index 00c61e821..000000000 --- a/tests/nodes/reduce_sum_fp16x16_2D_default/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_fp16x16_2D_default/output_0.cairo b/tests/nodes/reduce_sum_fp16x16_2D_default/output_0.cairo deleted file mode 100644 index bbd646932..000000000 --- a/tests/nodes/reduce_sum_fp16x16_2D_default/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 262144, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_fp16x16_2D_keepdims.cairo b/tests/nodes/reduce_sum_fp16x16_2D_keepdims.cairo deleted file mode 100644 index 7de0e4085..000000000 --- a/tests/nodes/reduce_sum_fp16x16_2D_keepdims.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_fp16x16_2D_keepdims() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.reduce_sum(0, true); - - assert_eq(y, z); -} diff --git a/tests/nodes/reduce_sum_fp16x16_2D_keepdims/input_0.cairo b/tests/nodes/reduce_sum_fp16x16_2D_keepdims/input_0.cairo deleted file mode 100644 index 00c61e821..000000000 --- a/tests/nodes/reduce_sum_fp16x16_2D_keepdims/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 196608, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_fp16x16_2D_keepdims/output_0.cairo b/tests/nodes/reduce_sum_fp16x16_2D_keepdims/output_0.cairo deleted file mode 100644 index 5d99577e2..000000000 --- a/tests/nodes/reduce_sum_fp16x16_2D_keepdims/output_0.cairo +++ /dev/null @@ -1,15 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorMul}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 262144, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_fp8x23_1D.cairo b/tests/nodes/reduce_sum_fp8x23_1D.cairo deleted file mode 100644 index 1887e4cda..000000000 --- a/tests/nodes/reduce_sum_fp8x23_1D.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_fp8x23_1D() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.reduce_sum(0, false); - - assert_eq(y, z); -} diff --git a/tests/nodes/reduce_sum_fp8x23_1D/input_0.cairo b/tests/nodes/reduce_sum_fp8x23_1D/input_0.cairo deleted file mode 100644 index e050eac48..000000000 --- a/tests/nodes/reduce_sum_fp8x23_1D/input_0.cairo +++ /dev/null @@ -1,15 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_fp8x23_1D/output_0.cairo b/tests/nodes/reduce_sum_fp8x23_1D/output_0.cairo deleted file mode 100644 index a82fe159c..000000000 --- a/tests/nodes/reduce_sum_fp8x23_1D/output_0.cairo +++ /dev/null @@ -1,13 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 25165824, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_fp8x23_2D_axis_1.cairo b/tests/nodes/reduce_sum_fp8x23_2D_axis_1.cairo deleted file mode 100644 index f8e1e9bd5..000000000 --- a/tests/nodes/reduce_sum_fp8x23_2D_axis_1.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_fp8x23_2D_axis_1() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.reduce_sum(1, false); - - assert_eq(y, z); -} diff --git a/tests/nodes/reduce_sum_fp8x23_2D_axis_1/input_0.cairo b/tests/nodes/reduce_sum_fp8x23_2D_axis_1/input_0.cairo deleted file mode 100644 index 29035e7f4..000000000 --- a/tests/nodes/reduce_sum_fp8x23_2D_axis_1/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_fp8x23_2D_axis_1/output_0.cairo b/tests/nodes/reduce_sum_fp8x23_2D_axis_1/output_0.cairo deleted file mode 100644 index 74d4ee8e5..000000000 --- a/tests/nodes/reduce_sum_fp8x23_2D_axis_1/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 41943040, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_fp8x23_2D_default.cairo b/tests/nodes/reduce_sum_fp8x23_2D_default.cairo deleted file mode 100644 index 95ef60ba0..000000000 --- a/tests/nodes/reduce_sum_fp8x23_2D_default.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_fp8x23_2D_default() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.reduce_sum(0, false); - - assert_eq(y, z); -} diff --git a/tests/nodes/reduce_sum_fp8x23_2D_default/input_0.cairo b/tests/nodes/reduce_sum_fp8x23_2D_default/input_0.cairo deleted file mode 100644 index 29035e7f4..000000000 --- a/tests/nodes/reduce_sum_fp8x23_2D_default/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_fp8x23_2D_default/output_0.cairo b/tests/nodes/reduce_sum_fp8x23_2D_default/output_0.cairo deleted file mode 100644 index 05c879685..000000000 --- a/tests/nodes/reduce_sum_fp8x23_2D_default/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_fp8x23_2D_keepdims.cairo b/tests/nodes/reduce_sum_fp8x23_2D_keepdims.cairo deleted file mode 100644 index b81947c90..000000000 --- a/tests/nodes/reduce_sum_fp8x23_2D_keepdims.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_fp8x23_2D_keepdims() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.reduce_sum(0, true); - - assert_eq(y, z); -} diff --git a/tests/nodes/reduce_sum_fp8x23_2D_keepdims/input_0.cairo b/tests/nodes/reduce_sum_fp8x23_2D_keepdims/input_0.cairo deleted file mode 100644 index 29035e7f4..000000000 --- a/tests/nodes/reduce_sum_fp8x23_2D_keepdims/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_i32_1D.cairo b/tests/nodes/reduce_sum_i32_1D.cairo deleted file mode 100644 index 7a31579df..000000000 --- a/tests/nodes/reduce_sum_i32_1D.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorMul}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_i32_1D() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.reduce_sum(0, false); - - assert_eq(y, z); -} diff --git a/tests/nodes/reduce_sum_i32_1D/input_0.cairo b/tests/nodes/reduce_sum_i32_1D/input_0.cairo deleted file mode 100644 index 064a0d4e6..000000000 --- a/tests/nodes/reduce_sum_i32_1D/input_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorMul}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_i32_2D_axis_1.cairo b/tests/nodes/reduce_sum_i32_2D_axis_1.cairo deleted file mode 100644 index b67a7691d..000000000 --- a/tests/nodes/reduce_sum_i32_2D_axis_1.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorMul}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_i32_2D_axis_1() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.reduce_sum(1, false); - - assert_eq(y, z); -} diff --git a/tests/nodes/reduce_sum_i32_2D_axis_1/output_0.cairo b/tests/nodes/reduce_sum_i32_2D_axis_1/output_0.cairo deleted file mode 100644 index bb1a0e727..000000000 --- a/tests/nodes/reduce_sum_i32_2D_axis_1/output_0.cairo +++ /dev/null @@ -1,13 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorMul}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(5); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_i32_2D_default.cairo b/tests/nodes/reduce_sum_i32_2D_default.cairo deleted file mode 100644 index a0428f46d..000000000 --- a/tests/nodes/reduce_sum_i32_2D_default.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorMul}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_i32_2D_default() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.reduce_sum(0, false); - - assert_eq(y, z); -} diff --git a/tests/nodes/reduce_sum_i32_2D_keepdims.cairo b/tests/nodes/reduce_sum_i32_2D_keepdims.cairo deleted file mode 100644 index acac6e0f5..000000000 --- a/tests/nodes/reduce_sum_i32_2D_keepdims.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::utils::{assert_eq, assert_seq_eq}; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorMul}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_i32_2D_keepdims() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.reduce_sum(0, true); - - assert_eq(y, z); -} diff --git a/tests/nodes/reduce_sum_i8_1D.cairo b/tests/nodes/reduce_sum_i8_1D.cairo deleted file mode 100644 index 0d8424333..000000000 --- a/tests/nodes/reduce_sum_i8_1D.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_i8_1D() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.reduce_sum(0, false); - - assert_eq(y, z); -} diff --git a/tests/nodes/reduce_sum_i8_1D/input_0.cairo b/tests/nodes/reduce_sum_i8_1D/input_0.cairo deleted file mode 100644 index 3d963b8f3..000000000 --- a/tests/nodes/reduce_sum_i8_1D/input_0.cairo +++ /dev/null @@ -1,15 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 1, sign: false }); - data.append(FP8x23 { mag: 2, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_i8_1D/output_0.cairo b/tests/nodes/reduce_sum_i8_1D/output_0.cairo deleted file mode 100644 index c2d0723f9..000000000 --- a/tests/nodes/reduce_sum_i8_1D/output_0.cairo +++ /dev/null @@ -1,13 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 3, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_i8_2D_axis_1.cairo b/tests/nodes/reduce_sum_i8_2D_axis_1.cairo deleted file mode 100644 index 1d4fb2700..000000000 --- a/tests/nodes/reduce_sum_i8_2D_axis_1.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_i8_2D_axis_1() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.reduce_sum(1, false); - - assert_eq(y, z); -} diff --git a/tests/nodes/reduce_sum_i8_2D_axis_1/input_0.cairo b/tests/nodes/reduce_sum_i8_2D_axis_1/input_0.cairo deleted file mode 100644 index 4c38e1659..000000000 --- a/tests/nodes/reduce_sum_i8_2D_axis_1/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 1, sign: false }); - data.append(FP8x23 { mag: 2, sign: false }); - data.append(FP8x23 { mag: 3, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_i8_2D_axis_1/output_0.cairo b/tests/nodes/reduce_sum_i8_2D_axis_1/output_0.cairo deleted file mode 100644 index a9a7cd3ae..000000000 --- a/tests/nodes/reduce_sum_i8_2D_axis_1/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 1, sign: false }); - data.append(FP8x23 { mag: 5, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_i8_2D_default.cairo b/tests/nodes/reduce_sum_i8_2D_default.cairo deleted file mode 100644 index 4875dc77c..000000000 --- a/tests/nodes/reduce_sum_i8_2D_default.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_i8_2D_default() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.reduce_sum(0, false); - - assert_eq(y, z); -} diff --git a/tests/nodes/reduce_sum_i8_2D_default/input_0.cairo b/tests/nodes/reduce_sum_i8_2D_default/input_0.cairo deleted file mode 100644 index 4c38e1659..000000000 --- a/tests/nodes/reduce_sum_i8_2D_default/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 1, sign: false }); - data.append(FP8x23 { mag: 2, sign: false }); - data.append(FP8x23 { mag: 3, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_i8_2D_default/output_0.cairo b/tests/nodes/reduce_sum_i8_2D_default/output_0.cairo deleted file mode 100644 index a517b5dca..000000000 --- a/tests/nodes/reduce_sum_i8_2D_default/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 2, sign: false }); - data.append(FP8x23 { mag: 4, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_i8_2D_keepdims.cairo b/tests/nodes/reduce_sum_i8_2D_keepdims.cairo deleted file mode 100644 index 98172e256..000000000 --- a/tests/nodes/reduce_sum_i8_2D_keepdims.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::FP8x23TensorPartialEq; -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_reduce_sum_i8_2D_keepdims() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.reduce_sum(0, true); - - assert_eq(y, z); -} diff --git a/tests/nodes/reduce_sum_i8_2D_keepdims/input_0.cairo b/tests/nodes/reduce_sum_i8_2D_keepdims/input_0.cairo deleted file mode 100644 index 4c38e1659..000000000 --- a/tests/nodes/reduce_sum_i8_2D_keepdims/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 1, sign: false }); - data.append(FP8x23 { mag: 2, sign: false }); - data.append(FP8x23 { mag: 3, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_i8_2D_keepdims/output_0.cairo b/tests/nodes/reduce_sum_i8_2D_keepdims/output_0.cairo deleted file mode 100644 index ef122111b..000000000 --- a/tests/nodes/reduce_sum_i8_2D_keepdims/output_0.cairo +++ /dev/null @@ -1,15 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 2, sign: false }); - data.append(FP8x23 { mag: 4, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/reduce_sum_keep_dims.cairo b/tests/nodes/reduce_sum_keep_dims.cairo new file mode 100644 index 000000000..19a37247a --- /dev/null +++ b/tests/nodes/reduce_sum_keep_dims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_keep_dims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum(Option::Some(array![1].span()), Option::Some(true), Option::None); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_i32_2D_default/input_0.cairo b/tests/nodes/reduce_sum_keep_dims/input_0.cairo similarity index 50% rename from tests/nodes/reduce_sum_i32_2D_default/input_0.cairo rename to tests/nodes/reduce_sum_keep_dims/input_0.cairo index bb508695d..2de5818c3 100644 --- a/tests/nodes/reduce_sum_i32_2D_default/input_0.cairo +++ b/tests/nodes/reduce_sum_keep_dims/input_0.cairo @@ -1,16 +1,26 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorMul}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn input_0() -> Tensor { +fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); + shape.append(3); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); data.append(1); data.append(2); data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + data.append(12); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/reduce_sum_i32_2D_keepdims/output_0.cairo b/tests/nodes/reduce_sum_keep_dims/output_0.cairo similarity index 54% rename from tests/nodes/reduce_sum_i32_2D_keepdims/output_0.cairo rename to tests/nodes/reduce_sum_keep_dims/output_0.cairo index 704b7fd71..5326997d6 100644 --- a/tests/nodes/reduce_sum_i32_2D_keepdims/output_0.cairo +++ b/tests/nodes/reduce_sum_keep_dims/output_0.cairo @@ -1,14 +1,20 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorMul}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); + shape.append(3); shape.append(1); shape.append(2); let mut data = ArrayTrait::new(); - data.append(2); data.append(4); + data.append(6); + data.append(12); + data.append(14); + data.append(20); + data.append(22); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/reduce_sum_negative_axes_keepdims.cairo b/tests/nodes/reduce_sum_negative_axes_keepdims.cairo new file mode 100644 index 000000000..4c271c091 --- /dev/null +++ b/tests/nodes/reduce_sum_negative_axes_keepdims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_negative_axes_keepdims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum(Option::Some(array![-2].span()), Option::Some(true), Option::None); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_negative_axes_keepdims/input_0.cairo b/tests/nodes/reduce_sum_negative_axes_keepdims/input_0.cairo new file mode 100644 index 000000000..2de5818c3 --- /dev/null +++ b/tests/nodes/reduce_sum_negative_axes_keepdims/input_0.cairo @@ -0,0 +1,26 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + data.append(12); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_fp8x23_2D_keepdims/output_0.cairo b/tests/nodes/reduce_sum_negative_axes_keepdims/output_0.cairo similarity index 50% rename from tests/nodes/reduce_sum_fp8x23_2D_keepdims/output_0.cairo rename to tests/nodes/reduce_sum_negative_axes_keepdims/output_0.cairo index 216765e7b..5326997d6 100644 --- a/tests/nodes/reduce_sum_fp8x23_2D_keepdims/output_0.cairo +++ b/tests/nodes/reduce_sum_negative_axes_keepdims/output_0.cairo @@ -1,15 +1,20 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorMul}; -use orion::numbers::{FixedTrait, FP8x23}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); + shape.append(3); shape.append(1); shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: false }); + data.append(4); + data.append(6); + data.append(12); + data.append(14); + data.append(20); + data.append(22); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/reduce_sum_no_keep_dims.cairo b/tests/nodes/reduce_sum_no_keep_dims.cairo new file mode 100644 index 000000000..78c1c0c66 --- /dev/null +++ b/tests/nodes/reduce_sum_no_keep_dims.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::U32TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_reduce_sum_no_keep_dims() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.reduce_sum(Option::Some(array![1].span()), Option::Some(false), Option::None); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/reduce_sum_no_keep_dims/input_0.cairo b/tests/nodes/reduce_sum_no_keep_dims/input_0.cairo new file mode 100644 index 000000000..2de5818c3 --- /dev/null +++ b/tests/nodes/reduce_sum_no_keep_dims/input_0.cairo @@ -0,0 +1,26 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(1); + data.append(2); + data.append(3); + data.append(4); + data.append(5); + data.append(6); + data.append(7); + data.append(8); + data.append(9); + data.append(10); + data.append(11); + data.append(12); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/reduce_sum_i32_2D_default/output_0.cairo b/tests/nodes/reduce_sum_no_keep_dims/output_0.cairo similarity index 52% rename from tests/nodes/reduce_sum_i32_2D_default/output_0.cairo rename to tests/nodes/reduce_sum_no_keep_dims/output_0.cairo index 925b8f6c2..72c71a185 100644 --- a/tests/nodes/reduce_sum_i32_2D_default/output_0.cairo +++ b/tests/nodes/reduce_sum_no_keep_dims/output_0.cairo @@ -1,13 +1,19 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorMul}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); + shape.append(3); shape.append(2); let mut data = ArrayTrait::new(); - data.append(2); data.append(4); + data.append(6); + data.append(12); + data.append(14); + data.append(20); + data.append(22); TensorTrait::new(shape.span(), data.span()) } From f0612ab5b35cd9695322780a70b88f0a8b78267f Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 25 Mar 2024 08:04:36 +0100 Subject: [PATCH 48/68] update doc --- .../operators/tensor/tensor.reduce_sum.md | 15 ++++++--------- src/operators/tensor/core.cairo | 15 ++++++--------- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/docs/framework/operators/tensor/tensor.reduce_sum.md b/docs/framework/operators/tensor/tensor.reduce_sum.md index 3aa77d2ce..be4ef4029 100644 --- a/docs/framework/operators/tensor/tensor.reduce_sum.md +++ b/docs/framework/operators/tensor/tensor.reduce_sum.md @@ -1,7 +1,7 @@ ## tensor.reduce_sum ```rust - fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; + fn reduce_sum(self: @Tensor, axes: Option>, keepdims: Option, noop_with_empty_axes: Option) -> Tensor; ``` Reduces a tensor by summing its elements along a specified axis. @@ -9,16 +9,13 @@ Reduces a tensor by summing its elements along a specified axis. ## Args * `self`(`@Tensor`) - The input tensor. -* `axis`(`usize`) - The dimension to reduce. -* `keepdims`(`bool`) - If true, retains reduced dimensions with length 1. - -## Panics - -* Panics if axis is not in the range of the input tensor's dimensions. +* `axes`(`Option>`) - Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if 'noop_with_empty_axes' is false, else act as an Identity op when 'noop_with_empty_axes' is true. +* `keepdims`(`Option`) - Keep the reduced dimension or not, default 1 means keep reduced dimension. +* `noop_with_empty_axes`(`Option`) - Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. ## Returns -A new `Tensor` instance with the specified axis reduced by summing its elements. +Reduced output tensor. ## Examples @@ -33,7 +30,7 @@ fn reduce_sum_example() -> Tensor { ); // We can call `reduce_sum` function as follows. - return tensor.reduce_sum(axis: 0, keepdims: false); + return tensor.reduce_sum(axes: Option::None, keepdims: false); } >>> [[4,6],[8,10]] ``` diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 464dfa19e..919471015 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -644,7 +644,7 @@ trait TensorTrait { /// ## tensor.reduce_sum /// /// ```rust - /// fn reduce_sum(self: @Tensor, axis: usize, keepdims: bool) -> Tensor; + /// fn reduce_sum(self: @Tensor, axes: Option>, keepdims: Option, noop_with_empty_axes: Option) -> Tensor; /// ``` /// /// Reduces a tensor by summing its elements along a specified axis. @@ -652,16 +652,13 @@ trait TensorTrait { /// ## Args /// /// * `self`(`@Tensor`) - The input tensor. - /// * `axis`(`usize`) - The dimension to reduce. - /// * `keepdims`(`bool`) - If true, retains reduced dimensions with length 1. - /// - /// ## Panics - /// - /// * Panics if axis is not in the range of the input tensor's dimensions. + /// * `axes`(`Option>`) - Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if 'noop_with_empty_axes' is false, else act as an Identity op when 'noop_with_empty_axes' is true. + /// * `keepdims`(`Option`) - Keep the reduced dimension or not, default 1 means keep reduced dimension. + /// * `noop_with_empty_axes`(`Option`) - Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor. /// /// ## Returns /// - /// A new `Tensor` instance with the specified axis reduced by summing its elements. + /// Reduced output tensor. /// /// ## Examples /// @@ -676,7 +673,7 @@ trait TensorTrait { /// ); /// /// // We can call `reduce_sum` function as follows. - /// return tensor.reduce_sum(axis: 0, keepdims: false); + /// return tensor.reduce_sum(axes: Option::None, keepdims: false); /// } /// >>> [[4,6],[8,10]] /// ``` From ca3b413360ca5bcaee890d198029ff41e566020e Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 25 Mar 2024 08:17:47 +0100 Subject: [PATCH 49/68] refactor operator --- .../ml/linear/linear_classifier.cairo | 2 +- .../ml/linear/linear_regressor.cairo | 2 +- src/operators/ml/svm/svm_classifier.cairo | 4 +-- src/operators/ml/svm/svm_regressor.cairo | 2 +- src/operators/nn/core.cairo | 2 +- src/operators/nn/functional/softmax.cairo | 26 ++++++++++--------- .../nn/implementations/nn_fp16x16.cairo | 2 +- .../nn/implementations/nn_fp32x32.cairo | 2 +- .../nn/implementations/nn_fp64x64.cairo | 2 +- .../nn/implementations/nn_fp8x23.cairo | 2 +- src/operators/nn/implementations/nn_i32.cairo | 2 +- src/operators/nn/implementations/nn_i8.cairo | 2 +- src/operators/nn/implementations/nn_u32.cairo | 2 +- 13 files changed, 27 insertions(+), 25 deletions(-) diff --git a/src/operators/ml/linear/linear_classifier.cairo b/src/operators/ml/linear/linear_classifier.cairo index 000179e36..18cbd6988 100644 --- a/src/operators/ml/linear/linear_classifier.cairo +++ b/src/operators/ml/linear/linear_classifier.cairo @@ -185,7 +185,7 @@ impl LinearClassifierImpl< // Post Transform scores = match classifier.post_transform { POST_TRANSFORM::NONE => { scores }, - POST_TRANSFORM::SOFTMAX => { NNTrait::softmax(@scores, 1) }, + POST_TRANSFORM::SOFTMAX => { NNTrait::softmax(@scores, Option::Some(1)) }, POST_TRANSFORM::LOGISTIC => { NNTrait::sigmoid(@scores) }, POST_TRANSFORM::SOFTMAXZERO => { NNTrait::softmax_zero(@scores, 1) }, POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'), diff --git a/src/operators/ml/linear/linear_regressor.cairo b/src/operators/ml/linear/linear_regressor.cairo index d15f55f89..cff0eeacb 100644 --- a/src/operators/ml/linear/linear_regressor.cairo +++ b/src/operators/ml/linear/linear_regressor.cairo @@ -206,7 +206,7 @@ impl LinearRegressorImpl< // Post Transform let score = match regressor.post_transform { POST_TRANSFORM::NONE => score, // No action required - POST_TRANSFORM::SOFTMAX => NNTrait::softmax(@score, 1), + POST_TRANSFORM::SOFTMAX => NNTrait::softmax(@score, Option::Some(1)), POST_TRANSFORM::LOGISTIC => NNTrait::sigmoid(@score), POST_TRANSFORM::SOFTMAXZERO => NNTrait::softmax_zero(@score, 1), POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'), diff --git a/src/operators/ml/svm/svm_classifier.cairo b/src/operators/ml/svm/svm_classifier.cairo index 8dacddfc0..4df3d63f6 100644 --- a/src/operators/ml/svm/svm_classifier.cairo +++ b/src/operators/ml/svm/svm_classifier.cairo @@ -680,7 +680,7 @@ fn write_scores< let new_scores = if n_classes >= 2 { let new_scores = match post_transform { POST_TRANSFORM::NONE => scores, - POST_TRANSFORM::SOFTMAX => NNTrait::softmax(@scores, 0), + POST_TRANSFORM::SOFTMAX => NNTrait::softmax(@scores, Option::Some(0)), POST_TRANSFORM::LOGISTIC => NNTrait::sigmoid(@scores), POST_TRANSFORM::SOFTMAXZERO => NNTrait::softmax_zero(@scores, 0), POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'), @@ -716,7 +716,7 @@ fn write_scores< @TensorTrait::new( array![2].span(), array![-*scores.data.at(0), *scores.data.at(0)].span() ), - 0 + Option::Some(0) ) } else { TensorTrait::new(array![1].span(), array![*scores.data.at(0)].span()) diff --git a/src/operators/ml/svm/svm_regressor.cairo b/src/operators/ml/svm/svm_regressor.cairo index d69e40d80..1d5858a2f 100644 --- a/src/operators/ml/svm/svm_regressor.cairo +++ b/src/operators/ml/svm/svm_regressor.cairo @@ -229,7 +229,7 @@ impl SVMRegressorImpl< score = match self.post_transform { POST_TRANSFORM::NONE => score, - POST_TRANSFORM::SOFTMAX => NNTrait::softmax(@score, 1), + POST_TRANSFORM::SOFTMAX => NNTrait::softmax(@score, Option::Some(1)), POST_TRANSFORM::LOGISTIC => NNTrait::sigmoid(@score), POST_TRANSFORM::SOFTMAXZERO => NNTrait::softmax_zero(@score, 1), POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'), diff --git a/src/operators/nn/core.cairo b/src/operators/nn/core.cairo index 35d318b28..cc2f5c77c 100644 --- a/src/operators/nn/core.cairo +++ b/src/operators/nn/core.cairo @@ -112,7 +112,7 @@ trait NNTrait { /// // [[0.2689, 0.7311],[0.2689, 0.7311]] /// ``` /// - fn softmax(tensor: @Tensor, axis: usize) -> Tensor; + fn softmax(tensor: @Tensor, axis: Option) -> Tensor; /// # NNTrait::softmax_zero /// /// ```rust diff --git a/src/operators/nn/functional/softmax.cairo b/src/operators/nn/functional/softmax.cairo index ba83438a4..108b1292f 100644 --- a/src/operators/nn/functional/softmax.cairo +++ b/src/operators/nn/functional/softmax.cairo @@ -10,15 +10,16 @@ fn softmax< impl TCopy: Copy, impl TDrop: Drop, >( - z: @Tensor, axis: usize + z: @Tensor, axis: Option ) -> Tensor { + let axis = match axis { + Option::Some(val) => val, + Option::None => -1 + }; + let exp_tensor = z.exp(); let sum = exp_tensor - .reduce_sum( - Option::Some(array![axis.try_into().unwrap()].span()), - Option::Some(true), - Option::Some(false) - ); + .reduce_sum(Option::Some(array![axis].span()), Option::Some(true), Option::Some(false)); exp_tensor / sum } @@ -41,15 +42,16 @@ fn softmaxWide< impl TFixed: FixedTrait, impl WFixed: FixedTrait, >( - z: @Tensor, axis: usize + z: @Tensor, axis: Option ) -> Tensor { + let axis = match axis { + Option::Some(val) => val, + Option::None => -1 + }; + let exp_tensor: Tensor = exp_upcast(*z); let sum = exp_tensor - .reduce_sum( - Option::Some(array![axis.try_into().unwrap()].span()), - Option::Some(true), - Option::Some(false) - ); + .reduce_sum(Option::Some(array![axis].span()), Option::Some(true), Option::Some(false)); div_downcast(@exp_tensor, @sum) } diff --git a/src/operators/nn/implementations/nn_fp16x16.cairo b/src/operators/nn/implementations/nn_fp16x16.cairo index 1c018ade3..cd65d0fd0 100644 --- a/src/operators/nn/implementations/nn_fp16x16.cairo +++ b/src/operators/nn/implementations/nn_fp16x16.cairo @@ -21,7 +21,7 @@ impl FP16x16NN of NNTrait { functional::sigmoid::sigmoid(*tensor) } - fn softmax(tensor: @Tensor, axis: usize) -> Tensor { + fn softmax(tensor: @Tensor, axis: Option) -> Tensor { functional::softmax::softmaxWide::(tensor, axis) } diff --git a/src/operators/nn/implementations/nn_fp32x32.cairo b/src/operators/nn/implementations/nn_fp32x32.cairo index a5725eccb..4baa425b7 100644 --- a/src/operators/nn/implementations/nn_fp32x32.cairo +++ b/src/operators/nn/implementations/nn_fp32x32.cairo @@ -15,7 +15,7 @@ impl FP32x32NN of NNTrait { functional::sigmoid::sigmoid(*tensor) } - fn softmax(tensor: @Tensor, axis: usize) -> Tensor { + fn softmax(tensor: @Tensor, axis: Option) -> Tensor { functional::softmax::softmax(tensor, axis) } diff --git a/src/operators/nn/implementations/nn_fp64x64.cairo b/src/operators/nn/implementations/nn_fp64x64.cairo index 01a3b30ad..588387700 100644 --- a/src/operators/nn/implementations/nn_fp64x64.cairo +++ b/src/operators/nn/implementations/nn_fp64x64.cairo @@ -15,7 +15,7 @@ impl FP64x64NN of NNTrait { functional::sigmoid::sigmoid(*tensor) } - fn softmax(tensor: @Tensor, axis: usize) -> Tensor { + fn softmax(tensor: @Tensor, axis: Option) -> Tensor { functional::softmax::softmax(tensor, axis) } diff --git a/src/operators/nn/implementations/nn_fp8x23.cairo b/src/operators/nn/implementations/nn_fp8x23.cairo index d80d2c323..bb8d73436 100644 --- a/src/operators/nn/implementations/nn_fp8x23.cairo +++ b/src/operators/nn/implementations/nn_fp8x23.cairo @@ -19,7 +19,7 @@ impl FP8x23NN of NNTrait { functional::sigmoid::sigmoid(*tensor) } - fn softmax(tensor: @Tensor, axis: usize) -> Tensor { + fn softmax(tensor: @Tensor, axis: Option) -> Tensor { functional::softmax::softmaxWide::(tensor, axis) } diff --git a/src/operators/nn/implementations/nn_i32.cairo b/src/operators/nn/implementations/nn_i32.cairo index 29a94d288..49200fe0b 100644 --- a/src/operators/nn/implementations/nn_i32.cairo +++ b/src/operators/nn/implementations/nn_i32.cairo @@ -12,7 +12,7 @@ impl I32NN of NNTrait { panic(array!['not supported!']) } - fn softmax(tensor: @Tensor, axis: usize) -> Tensor { + fn softmax(tensor: @Tensor, axis: Option) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/nn/implementations/nn_i8.cairo b/src/operators/nn/implementations/nn_i8.cairo index e22de6b43..f481fdc80 100644 --- a/src/operators/nn/implementations/nn_i8.cairo +++ b/src/operators/nn/implementations/nn_i8.cairo @@ -12,7 +12,7 @@ impl I8NN of NNTrait { panic(array!['not supported!']) } - fn softmax(tensor: @Tensor, axis: usize) -> Tensor { + fn softmax(tensor: @Tensor, axis: Option) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/nn/implementations/nn_u32.cairo b/src/operators/nn/implementations/nn_u32.cairo index 7352b7ad9..ec0d6dedc 100644 --- a/src/operators/nn/implementations/nn_u32.cairo +++ b/src/operators/nn/implementations/nn_u32.cairo @@ -12,7 +12,7 @@ impl U32NN of NNTrait { panic(array!['not supported!']) } - fn softmax(tensor: @Tensor, axis: usize) -> Tensor { + fn softmax(tensor: @Tensor, axis: Option) -> Tensor { panic(array!['not supported!']) } From 5b17608c0c6d78ab4e1ff68ecf52768f443c4ad6 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 25 Mar 2024 08:26:12 +0100 Subject: [PATCH 50/68] test operator --- nodegen/node/softmax.py | 60 ++++++++++----- tests/nodes.cairo | 6 +- ...max_fp16x16.cairo => softmax_axis_0.cairo} | 14 ++-- tests/nodes/softmax_axis_0/input_0.cairo | 74 +++++++++++++++++++ tests/nodes/softmax_axis_0/output_0.cairo | 74 +++++++++++++++++++ tests/nodes/softmax_axis_1.cairo | 20 +++++ tests/nodes/softmax_axis_1/input_0.cairo | 74 +++++++++++++++++++ tests/nodes/softmax_axis_1/output_0.cairo | 74 +++++++++++++++++++ tests/nodes/softmax_axis_2.cairo | 20 +++++ tests/nodes/softmax_axis_2/input_0.cairo | 74 +++++++++++++++++++ tests/nodes/softmax_axis_2/output_0.cairo | 74 +++++++++++++++++++ tests/nodes/softmax_axis_minus_1.cairo | 20 +++++ .../nodes/softmax_axis_minus_1/input_0.cairo | 74 +++++++++++++++++++ .../nodes/softmax_axis_minus_1/output_0.cairo | 74 +++++++++++++++++++ tests/nodes/softmax_fp16x16/input_0.cairo | 17 ----- tests/nodes/softmax_fp16x16/output_0.cairo | 17 ----- tests/nodes/softmax_fp8x23.cairo | 20 ----- tests/nodes/softmax_fp8x23/input_0.cairo | 17 ----- tests/nodes/softmax_fp8x23/output_0.cairo | 17 ----- 19 files changed, 706 insertions(+), 114 deletions(-) rename tests/nodes/{softmax_fp16x16.cairo => softmax_axis_0.cairo} (68%) create mode 100644 tests/nodes/softmax_axis_0/input_0.cairo create mode 100644 tests/nodes/softmax_axis_0/output_0.cairo create mode 100644 tests/nodes/softmax_axis_1.cairo create mode 100644 tests/nodes/softmax_axis_1/input_0.cairo create mode 100644 tests/nodes/softmax_axis_1/output_0.cairo create mode 100644 tests/nodes/softmax_axis_2.cairo create mode 100644 tests/nodes/softmax_axis_2/input_0.cairo create mode 100644 tests/nodes/softmax_axis_2/output_0.cairo create mode 100644 tests/nodes/softmax_axis_minus_1.cairo create mode 100644 tests/nodes/softmax_axis_minus_1/input_0.cairo create mode 100644 tests/nodes/softmax_axis_minus_1/output_0.cairo delete mode 100644 tests/nodes/softmax_fp16x16/input_0.cairo delete mode 100644 tests/nodes/softmax_fp16x16/output_0.cairo delete mode 100644 tests/nodes/softmax_fp8x23.cairo delete mode 100644 tests/nodes/softmax_fp8x23/input_0.cairo delete mode 100644 tests/nodes/softmax_fp8x23/output_0.cairo diff --git a/nodegen/node/softmax.py b/nodegen/node/softmax.py index 9e5ee99c1..6b59fae9b 100644 --- a/nodegen/node/softmax.py +++ b/nodegen/node/softmax.py @@ -11,33 +11,59 @@ def softmax(x: np.ndarray, axis: int = -1) -> np.ndarray: class Softmax(RunAll): + + @staticmethod + def axis_0(): + x = np.abs(np.random.randn(3, 4, 5).astype(np.float32)) + y = softmax(x, axis=0) + x = Tensor(Dtype.FP16x16, x.shape, to_fp( + x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + name = "softmax_axis_0" + make_test([x], y, "NNTrait::softmax(@input_0, Option::Some(0))", + name, Trait.NN) + @staticmethod - def fp8x23(): - x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64) - y = softmax(x, 0) - - x = Tensor(Dtype.FP8x23, x.shape, to_fp( - x.flatten(), FixedImpl.FP8x23)) - y = Tensor(Dtype.FP8x23, y.shape, to_fp( - y.flatten(), FixedImpl.FP8x23)) + def axis_1(): + x = np.abs(np.random.randn(3, 4, 5).astype(np.float32)) + y = softmax(x, axis=1) - name = "softmax_fp8x23" - make_test([x], y, "NNTrait::softmax(@input_0, 0)", - name, Trait.NN) + x = Tensor(Dtype.FP16x16, x.shape, to_fp( + x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + name = "softmax_axis_1" + make_test([x], y, "NNTrait::softmax(@input_0, Option::Some(1))", + name, Trait.NN) + @staticmethod - def fp16x16(): - x = np.random.uniform(-3, 3, (2, 2)).astype(np.float64) - y = softmax(x, 1) + def axis_2(): + x = np.abs(np.random.randn(3, 4, 5).astype(np.float32)) + y = softmax(x, axis=2) x = Tensor(Dtype.FP16x16, x.shape, to_fp( x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - name = "softmax_fp16x16" - make_test([x], y, "NNTrait::softmax(@input_0, 1)", - name, Trait.NN) + name = "softmax_axis_2" + make_test([x], y, "NNTrait::softmax(@input_0, Option::Some(2))", + name, Trait.NN) + + @staticmethod + def axis_minus_1(): + x = np.abs(np.random.randn(3, 4, 5).astype(np.float32)) + y = softmax(x, axis=-1) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp( + x.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp( + y.flatten(), FixedImpl.FP16x16)) + name = "softmax_axis_minus_1" + make_test([x], y, "NNTrait::softmax(@input_0, Option::None)", + name, Trait.NN) diff --git a/tests/nodes.cairo b/tests/nodes.cairo index c43ec76d3..19b1a7df9 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -288,8 +288,6 @@ // mod sin_fp8x23; // mod sinh_fp16x16; // mod sinh_fp8x23; -// mod softmax_fp16x16; -// mod softmax_fp8x23; // mod softplus_fp8x23; // mod softplus_fp16x16; // mod softsign_fp8x23; @@ -1020,3 +1018,7 @@ mod gather_elements_default; mod gather_elements_axis1; mod gather_elements_axis2; mod gather_elements_negative_indices; +mod softmax_axis_0; +mod softmax_axis_1; +mod softmax_axis_2; +mod softmax_axis_minus_1; diff --git a/tests/nodes/softmax_fp16x16.cairo b/tests/nodes/softmax_axis_0.cairo similarity index 68% rename from tests/nodes/softmax_fp16x16.cairo rename to tests/nodes/softmax_axis_0.cairo index 84fe1cf3d..135107f47 100644 --- a/tests/nodes/softmax_fp16x16.cairo +++ b/tests/nodes/softmax_axis_0.cairo @@ -2,19 +2,19 @@ mod input_0; mod output_0; -use orion::operators::nn::NNTrait; -use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::nn::FP16x16NN; +use orion::operators::nn::NNTrait; use orion::numbers::FixedTrait; -use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; #[test] #[available_gas(2000000000)] -fn test_softmax_fp16x16() { +fn test_softmax_axis_0() { let input_0 = input_0::input_0(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = NNTrait::softmax(@input_0, 1); + let y_0 = NNTrait::softmax(@input_0, Option::Some(0)); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/softmax_axis_0/input_0.cairo b/tests/nodes/softmax_axis_0/input_0.cairo new file mode 100644 index 000000000..b67797ca4 --- /dev/null +++ b/tests/nodes/softmax_axis_0/input_0.cairo @@ -0,0 +1,74 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(4); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 77748, sign: false }); + data.append(FP16x16 { mag: 20181, sign: false }); + data.append(FP16x16 { mag: 66586, sign: false }); + data.append(FP16x16 { mag: 39564, sign: false }); + data.append(FP16x16 { mag: 55469, sign: false }); + data.append(FP16x16 { mag: 15765, sign: false }); + data.append(FP16x16 { mag: 31745, sign: false }); + data.append(FP16x16 { mag: 64291, sign: false }); + data.append(FP16x16 { mag: 64704, sign: false }); + data.append(FP16x16 { mag: 95806, sign: false }); + data.append(FP16x16 { mag: 42434, sign: false }); + data.append(FP16x16 { mag: 107711, sign: false }); + data.append(FP16x16 { mag: 63051, sign: false }); + data.append(FP16x16 { mag: 93445, sign: false }); + data.append(FP16x16 { mag: 241, sign: false }); + data.append(FP16x16 { mag: 131759, sign: false }); + data.append(FP16x16 { mag: 74671, sign: false }); + data.append(FP16x16 { mag: 44973, sign: false }); + data.append(FP16x16 { mag: 92338, sign: false }); + data.append(FP16x16 { mag: 36204, sign: false }); + data.append(FP16x16 { mag: 12200, sign: false }); + data.append(FP16x16 { mag: 73821, sign: false }); + data.append(FP16x16 { mag: 13038, sign: false }); + data.append(FP16x16 { mag: 21598, sign: false }); + data.append(FP16x16 { mag: 75353, sign: false }); + data.append(FP16x16 { mag: 41470, sign: false }); + data.append(FP16x16 { mag: 11370, sign: false }); + data.append(FP16x16 { mag: 62793, sign: false }); + data.append(FP16x16 { mag: 19117, sign: false }); + data.append(FP16x16 { mag: 95800, sign: false }); + data.append(FP16x16 { mag: 40696, sign: false }); + data.append(FP16x16 { mag: 95240, sign: false }); + data.append(FP16x16 { mag: 103492, sign: false }); + data.append(FP16x16 { mag: 36412, sign: false }); + data.append(FP16x16 { mag: 22269, sign: false }); + data.append(FP16x16 { mag: 201968, sign: false }); + data.append(FP16x16 { mag: 40874, sign: false }); + data.append(FP16x16 { mag: 14038, sign: false }); + data.append(FP16x16 { mag: 55733, sign: false }); + data.append(FP16x16 { mag: 65120, sign: false }); + data.append(FP16x16 { mag: 128415, sign: false }); + data.append(FP16x16 { mag: 86247, sign: false }); + data.append(FP16x16 { mag: 47611, sign: false }); + data.append(FP16x16 { mag: 34746, sign: false }); + data.append(FP16x16 { mag: 23589, sign: false }); + data.append(FP16x16 { mag: 51498, sign: false }); + data.append(FP16x16 { mag: 6664, sign: false }); + data.append(FP16x16 { mag: 32348, sign: false }); + data.append(FP16x16 { mag: 31728, sign: false }); + data.append(FP16x16 { mag: 43457, sign: false }); + data.append(FP16x16 { mag: 41874, sign: false }); + data.append(FP16x16 { mag: 17514, sign: false }); + data.append(FP16x16 { mag: 42083, sign: false }); + data.append(FP16x16 { mag: 30365, sign: false }); + data.append(FP16x16 { mag: 133274, sign: false }); + data.append(FP16x16 { mag: 54633, sign: false }); + data.append(FP16x16 { mag: 168600, sign: false }); + data.append(FP16x16 { mag: 15559, sign: false }); + data.append(FP16x16 { mag: 50448, sign: false }); + data.append(FP16x16 { mag: 70775, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/softmax_axis_0/output_0.cairo b/tests/nodes/softmax_axis_0/output_0.cairo new file mode 100644 index 000000000..662d59d5b --- /dev/null +++ b/tests/nodes/softmax_axis_0/output_0.cairo @@ -0,0 +1,74 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(4); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 18542, sign: false }); + data.append(FP16x16 { mag: 10909, sign: false }); + data.append(FP16x16 { mag: 29920, sign: false }); + data.append(FP16x16 { mag: 24368, sign: false }); + data.append(FP16x16 { mag: 22071, sign: false }); + data.append(FP16x16 { mag: 15584, sign: false }); + data.append(FP16x16 { mag: 27139, sign: false }); + data.append(FP16x16 { mag: 25287, sign: false }); + data.append(FP16x16 { mag: 31157, sign: false }); + data.append(FP16x16 { mag: 26751, sign: false }); + data.append(FP16x16 { mag: 22100, sign: false }); + data.append(FP16x16 { mag: 31519, sign: false }); + data.append(FP16x16 { mag: 18307, sign: false }); + data.append(FP16x16 { mag: 36393, sign: false }); + data.append(FP16x16 { mag: 6545, sign: false }); + data.append(FP16x16 { mag: 15502, sign: false }); + data.append(FP16x16 { mag: 11319, sign: false }); + data.append(FP16x16 { mag: 28971, sign: false }); + data.append(FP16x16 { mag: 31211, sign: false }); + data.append(FP16x16 { mag: 15422, sign: false }); + data.append(FP16x16 { mag: 6820, sign: false }); + data.append(FP16x16 { mag: 24731, sign: false }); + data.append(FP16x16 { mag: 13216, sign: false }); + data.append(FP16x16 { mag: 18525, sign: false }); + data.append(FP16x16 { mag: 29894, sign: false }); + data.append(FP16x16 { mag: 23068, sign: false }); + data.append(FP16x16 { mag: 19887, sign: false }); + data.append(FP16x16 { mag: 24716, sign: false }); + data.append(FP16x16 { mag: 15540, sign: false }); + data.append(FP16x16 { mag: 26749, sign: false }); + data.append(FP16x16 { mag: 21522, sign: false }); + data.append(FP16x16 { mag: 26057, sign: false }); + data.append(FP16x16 { mag: 33933, sign: false }); + data.append(FP16x16 { mag: 15242, sign: false }); + data.append(FP16x16 { mag: 9159, sign: false }); + data.append(FP16x16 { mag: 45254, sign: false }); + data.append(FP16x16 { mag: 6759, sign: false }); + data.append(FP16x16 { mag: 18070, sign: false }); + data.append(FP16x16 { mag: 17854, sign: false }); + data.append(FP16x16 { mag: 23976, sign: false }); + data.append(FP16x16 { mag: 40173, sign: false }); + data.append(FP16x16 { mag: 29894, sign: false }); + data.append(FP16x16 { mag: 22398, sign: false }); + data.append(FP16x16 { mag: 22641, sign: false }); + data.append(FP16x16 { mag: 13569, sign: false }); + data.append(FP16x16 { mag: 26883, sign: false }); + data.append(FP16x16 { mag: 18509, sign: false }); + data.append(FP16x16 { mag: 15532, sign: false }); + data.append(FP16x16 { mag: 18838, sign: false }); + data.append(FP16x16 { mag: 12034, sign: false }); + data.append(FP16x16 { mag: 21912, sign: false }); + data.append(FP16x16 { mag: 7959, sign: false }); + data.append(FP16x16 { mag: 13294, sign: false }); + data.append(FP16x16 { mag: 13899, sign: false }); + data.append(FP16x16 { mag: 49831, sign: false }); + data.append(FP16x16 { mag: 4778, sign: false }); + data.append(FP16x16 { mag: 47456, sign: false }); + data.append(FP16x16 { mag: 18494, sign: false }); + data.append(FP16x16 { mag: 16470, sign: false }); + data.append(FP16x16 { mag: 26136, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/softmax_axis_1.cairo b/tests/nodes/softmax_axis_1.cairo new file mode 100644 index 000000000..74517830b --- /dev/null +++ b/tests/nodes/softmax_axis_1.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::FP16x16NN; +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_softmax_axis_1() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::softmax(@input_0, Option::Some(1)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/softmax_axis_1/input_0.cairo b/tests/nodes/softmax_axis_1/input_0.cairo new file mode 100644 index 000000000..84f011140 --- /dev/null +++ b/tests/nodes/softmax_axis_1/input_0.cairo @@ -0,0 +1,74 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(4); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 55504, sign: false }); + data.append(FP16x16 { mag: 131012, sign: false }); + data.append(FP16x16 { mag: 66466, sign: false }); + data.append(FP16x16 { mag: 15137, sign: false }); + data.append(FP16x16 { mag: 184134, sign: false }); + data.append(FP16x16 { mag: 45919, sign: false }); + data.append(FP16x16 { mag: 61072, sign: false }); + data.append(FP16x16 { mag: 18808, sign: false }); + data.append(FP16x16 { mag: 10438, sign: false }); + data.append(FP16x16 { mag: 28335, sign: false }); + data.append(FP16x16 { mag: 19320, sign: false }); + data.append(FP16x16 { mag: 18945, sign: false }); + data.append(FP16x16 { mag: 51241, sign: false }); + data.append(FP16x16 { mag: 29903, sign: false }); + data.append(FP16x16 { mag: 9030, sign: false }); + data.append(FP16x16 { mag: 112806, sign: false }); + data.append(FP16x16 { mag: 28939, sign: false }); + data.append(FP16x16 { mag: 112572, sign: false }); + data.append(FP16x16 { mag: 89990, sign: false }); + data.append(FP16x16 { mag: 87594, sign: false }); + data.append(FP16x16 { mag: 56996, sign: false }); + data.append(FP16x16 { mag: 31238, sign: false }); + data.append(FP16x16 { mag: 66896, sign: false }); + data.append(FP16x16 { mag: 37962, sign: false }); + data.append(FP16x16 { mag: 26194, sign: false }); + data.append(FP16x16 { mag: 59208, sign: false }); + data.append(FP16x16 { mag: 6005, sign: false }); + data.append(FP16x16 { mag: 16581, sign: false }); + data.append(FP16x16 { mag: 27378, sign: false }); + data.append(FP16x16 { mag: 59336, sign: false }); + data.append(FP16x16 { mag: 11513, sign: false }); + data.append(FP16x16 { mag: 12294, sign: false }); + data.append(FP16x16 { mag: 4336, sign: false }); + data.append(FP16x16 { mag: 111725, sign: false }); + data.append(FP16x16 { mag: 45307, sign: false }); + data.append(FP16x16 { mag: 145057, sign: false }); + data.append(FP16x16 { mag: 44365, sign: false }); + data.append(FP16x16 { mag: 80274, sign: false }); + data.append(FP16x16 { mag: 50643, sign: false }); + data.append(FP16x16 { mag: 39432, sign: false }); + data.append(FP16x16 { mag: 53176, sign: false }); + data.append(FP16x16 { mag: 202691, sign: false }); + data.append(FP16x16 { mag: 54389, sign: false }); + data.append(FP16x16 { mag: 125453, sign: false }); + data.append(FP16x16 { mag: 101533, sign: false }); + data.append(FP16x16 { mag: 2658, sign: false }); + data.append(FP16x16 { mag: 31411, sign: false }); + data.append(FP16x16 { mag: 44406, sign: false }); + data.append(FP16x16 { mag: 82774, sign: false }); + data.append(FP16x16 { mag: 36316, sign: false }); + data.append(FP16x16 { mag: 37737, sign: false }); + data.append(FP16x16 { mag: 5076, sign: false }); + data.append(FP16x16 { mag: 48499, sign: false }); + data.append(FP16x16 { mag: 3099, sign: false }); + data.append(FP16x16 { mag: 168018, sign: false }); + data.append(FP16x16 { mag: 18863, sign: false }); + data.append(FP16x16 { mag: 16555, sign: false }); + data.append(FP16x16 { mag: 4096, sign: false }); + data.append(FP16x16 { mag: 227, sign: false }); + data.append(FP16x16 { mag: 35060, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/softmax_axis_1/output_0.cairo b/tests/nodes/softmax_axis_1/output_0.cairo new file mode 100644 index 000000000..6421c06e1 --- /dev/null +++ b/tests/nodes/softmax_axis_1/output_0.cairo @@ -0,0 +1,74 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(4); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 13548, sign: false }); + data.append(FP16x16 { mag: 37762, sign: false }); + data.append(FP16x16 { mag: 15252, sign: false }); + data.append(FP16x16 { mag: 10374, sign: false }); + data.append(FP16x16 { mag: 47109, sign: false }); + data.append(FP16x16 { mag: 11705, sign: false }); + data.append(FP16x16 { mag: 12989, sign: false }); + data.append(FP16x16 { mag: 7370, sign: false }); + data.append(FP16x16 { mag: 9656, sign: false }); + data.append(FP16x16 { mag: 4371, sign: false }); + data.append(FP16x16 { mag: 7800, sign: false }); + data.append(FP16x16 { mag: 6829, sign: false }); + data.append(FP16x16 { mag: 12090, sign: false }); + data.append(FP16x16 { mag: 12996, sign: false }); + data.append(FP16x16 { mag: 3256, sign: false }); + data.append(FP16x16 { mag: 32481, sign: false }); + data.append(FP16x16 { mag: 7954, sign: false }); + data.append(FP16x16 { mag: 30822, sign: false }); + data.append(FP16x16 { mag: 32508, sign: false }); + data.append(FP16x16 { mag: 10798, sign: false }); + data.append(FP16x16 { mag: 10292, sign: false }); + data.append(FP16x16 { mag: 17949, sign: false }); + data.append(FP16x16 { mag: 21309, sign: false }); + data.append(FP16x16 { mag: 10662, sign: false }); + data.append(FP16x16 { mag: 12553, sign: false }); + data.append(FP16x16 { mag: 10646, sign: false }); + data.append(FP16x16 { mag: 12213, sign: false }); + data.append(FP16x16 { mag: 9888, sign: false }); + data.append(FP16x16 { mag: 9072, sign: false }); + data.append(FP16x16 { mag: 20815, sign: false }); + data.append(FP16x16 { mag: 5141, sign: false }); + data.append(FP16x16 { mag: 13443, sign: false }); + data.append(FP16x16 { mag: 8203, sign: false }); + data.append(FP16x16 { mag: 32861, sign: false }); + data.append(FP16x16 { mag: 16804, sign: false }); + data.append(FP16x16 { mag: 39454, sign: false }); + data.append(FP16x16 { mag: 21930, sign: false }); + data.append(FP16x16 { mag: 26134, sign: false }); + data.append(FP16x16 { mag: 12939, sign: false }); + data.append(FP16x16 { mag: 15363, sign: false }); + data.append(FP16x16 { mag: 23034, sign: false }); + data.append(FP16x16 { mag: 55505, sign: false }); + data.append(FP16x16 { mag: 20245, sign: false }); + data.append(FP16x16 { mag: 35930, sign: false }); + data.append(FP16x16 { mag: 14595, sign: false }); + data.append(FP16x16 { mag: 10656, sign: false }); + data.append(FP16x16 { mag: 4067, sign: false }); + data.append(FP16x16 { mag: 17385, sign: false }); + data.append(FP16x16 { mag: 18734, sign: false }); + data.append(FP16x16 { mag: 5395, sign: false }); + data.append(FP16x16 { mag: 18199, sign: false }); + data.append(FP16x16 { mag: 2721, sign: false }); + data.append(FP16x16 { mag: 18505, sign: false }); + data.append(FP16x16 { mag: 5554, sign: false }); + data.append(FP16x16 { mag: 40252, sign: false }); + data.append(FP16x16 { mag: 13645, sign: false }); + data.append(FP16x16 { mag: 3242, sign: false }); + data.append(FP16x16 { mag: 9398, sign: false }); + data.append(FP16x16 { mag: 5316, sign: false }); + data.append(FP16x16 { mag: 5293, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/softmax_axis_2.cairo b/tests/nodes/softmax_axis_2.cairo new file mode 100644 index 000000000..87e8f6320 --- /dev/null +++ b/tests/nodes/softmax_axis_2.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::FP16x16NN; +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_softmax_axis_2() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::softmax(@input_0, Option::Some(2)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/softmax_axis_2/input_0.cairo b/tests/nodes/softmax_axis_2/input_0.cairo new file mode 100644 index 000000000..4584fe371 --- /dev/null +++ b/tests/nodes/softmax_axis_2/input_0.cairo @@ -0,0 +1,74 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(4); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 58553, sign: false }); + data.append(FP16x16 { mag: 53519, sign: false }); + data.append(FP16x16 { mag: 57124, sign: false }); + data.append(FP16x16 { mag: 43576, sign: false }); + data.append(FP16x16 { mag: 41799, sign: false }); + data.append(FP16x16 { mag: 176139, sign: false }); + data.append(FP16x16 { mag: 41400, sign: false }); + data.append(FP16x16 { mag: 87597, sign: false }); + data.append(FP16x16 { mag: 15096, sign: false }); + data.append(FP16x16 { mag: 38563, sign: false }); + data.append(FP16x16 { mag: 52406, sign: false }); + data.append(FP16x16 { mag: 62539, sign: false }); + data.append(FP16x16 { mag: 54826, sign: false }); + data.append(FP16x16 { mag: 55549, sign: false }); + data.append(FP16x16 { mag: 14067, sign: false }); + data.append(FP16x16 { mag: 21619, sign: false }); + data.append(FP16x16 { mag: 40071, sign: false }); + data.append(FP16x16 { mag: 43510, sign: false }); + data.append(FP16x16 { mag: 39683, sign: false }); + data.append(FP16x16 { mag: 121524, sign: false }); + data.append(FP16x16 { mag: 82816, sign: false }); + data.append(FP16x16 { mag: 75559, sign: false }); + data.append(FP16x16 { mag: 66467, sign: false }); + data.append(FP16x16 { mag: 122834, sign: false }); + data.append(FP16x16 { mag: 88244, sign: false }); + data.append(FP16x16 { mag: 1460, sign: false }); + data.append(FP16x16 { mag: 37949, sign: false }); + data.append(FP16x16 { mag: 39839, sign: false }); + data.append(FP16x16 { mag: 741, sign: false }); + data.append(FP16x16 { mag: 63886, sign: false }); + data.append(FP16x16 { mag: 27798, sign: false }); + data.append(FP16x16 { mag: 122499, sign: false }); + data.append(FP16x16 { mag: 56745, sign: false }); + data.append(FP16x16 { mag: 40400, sign: false }); + data.append(FP16x16 { mag: 34434, sign: false }); + data.append(FP16x16 { mag: 33694, sign: false }); + data.append(FP16x16 { mag: 33967, sign: false }); + data.append(FP16x16 { mag: 67256, sign: false }); + data.append(FP16x16 { mag: 59193, sign: false }); + data.append(FP16x16 { mag: 51674, sign: false }); + data.append(FP16x16 { mag: 158517, sign: false }); + data.append(FP16x16 { mag: 16432, sign: false }); + data.append(FP16x16 { mag: 21737, sign: false }); + data.append(FP16x16 { mag: 28953, sign: false }); + data.append(FP16x16 { mag: 44046, sign: false }); + data.append(FP16x16 { mag: 53588, sign: false }); + data.append(FP16x16 { mag: 83878, sign: false }); + data.append(FP16x16 { mag: 89156, sign: false }); + data.append(FP16x16 { mag: 60105, sign: false }); + data.append(FP16x16 { mag: 97829, sign: false }); + data.append(FP16x16 { mag: 11903, sign: false }); + data.append(FP16x16 { mag: 10343, sign: false }); + data.append(FP16x16 { mag: 35204, sign: false }); + data.append(FP16x16 { mag: 25359, sign: false }); + data.append(FP16x16 { mag: 121776, sign: false }); + data.append(FP16x16 { mag: 48421, sign: false }); + data.append(FP16x16 { mag: 41238, sign: false }); + data.append(FP16x16 { mag: 15248, sign: false }); + data.append(FP16x16 { mag: 15911, sign: false }); + data.append(FP16x16 { mag: 52419, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/softmax_axis_2/output_0.cairo b/tests/nodes/softmax_axis_2/output_0.cairo new file mode 100644 index 000000000..02082e0e3 --- /dev/null +++ b/tests/nodes/softmax_axis_2/output_0.cairo @@ -0,0 +1,74 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(4); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 14646, sign: false }); + data.append(FP16x16 { mag: 13563, sign: false }); + data.append(FP16x16 { mag: 14330, sign: false }); + data.append(FP16x16 { mag: 11653, sign: false }); + data.append(FP16x16 { mag: 11342, sign: false }); + data.append(FP16x16 { mag: 41084, sign: false }); + data.append(FP16x16 { mag: 5257, sign: false }); + data.append(FP16x16 { mag: 10639, sign: false }); + data.append(FP16x16 { mag: 3519, sign: false }); + data.append(FP16x16 { mag: 5034, sign: false }); + data.append(FP16x16 { mag: 13621, sign: false }); + data.append(FP16x16 { mag: 15899, sign: false }); + data.append(FP16x16 { mag: 14134, sign: false }); + data.append(FP16x16 { mag: 14291, sign: false }); + data.append(FP16x16 { mag: 7588, sign: false }); + data.append(FP16x16 { mag: 6804, sign: false }); + data.append(FP16x16 { mag: 9016, sign: false }); + data.append(FP16x16 { mag: 9502, sign: false }); + data.append(FP16x16 { mag: 8963, sign: false }); + data.append(FP16x16 { mag: 31248, sign: false }); + data.append(FP16x16 { mag: 11697, sign: false }); + data.append(FP16x16 { mag: 10471, sign: false }); + data.append(FP16x16 { mag: 9115, sign: false }); + data.append(FP16x16 { mag: 21542, sign: false }); + data.append(FP16x16 { mag: 12708, sign: false }); + data.append(FP16x16 { mag: 8068, sign: false }); + data.append(FP16x16 { mag: 14079, sign: false }); + data.append(FP16x16 { mag: 14491, sign: false }); + data.append(FP16x16 { mag: 7980, sign: false }); + data.append(FP16x16 { mag: 20915, sign: false }); + data.append(FP16x16 { mag: 7189, sign: false }); + data.append(FP16x16 { mag: 30496, sign: false }); + data.append(FP16x16 { mag: 11181, sign: false }); + data.append(FP16x16 { mag: 8713, sign: false }); + data.append(FP16x16 { mag: 7955, sign: false }); + data.append(FP16x16 { mag: 10138, sign: false }); + data.append(FP16x16 { mag: 10180, sign: false }); + data.append(FP16x16 { mag: 16918, sign: false }); + data.append(FP16x16 { mag: 14960, sign: false }); + data.append(FP16x16 { mag: 13338, sign: false }); + data.append(FP16x16 { mag: 42246, sign: false }); + data.append(FP16x16 { mag: 4833, sign: false }); + data.append(FP16x16 { mag: 5240, sign: false }); + data.append(FP16x16 { mag: 5850, sign: false }); + data.append(FP16x16 { mag: 7365, sign: false }); + data.append(FP16x16 { mag: 8883, sign: false }); + data.append(FP16x16 { mag: 14103, sign: false }); + data.append(FP16x16 { mag: 15286, sign: false }); + data.append(FP16x16 { mag: 9812, sign: false }); + data.append(FP16x16 { mag: 17449, sign: false }); + data.append(FP16x16 { mag: 6567, sign: false }); + data.append(FP16x16 { mag: 6413, sign: false }); + data.append(FP16x16 { mag: 9372, sign: false }); + data.append(FP16x16 { mag: 8064, sign: false }); + data.append(FP16x16 { mag: 35118, sign: false }); + data.append(FP16x16 { mag: 15713, sign: false }); + data.append(FP16x16 { mag: 14081, sign: false }); + data.append(FP16x16 { mag: 9471, sign: false }); + data.append(FP16x16 { mag: 9567, sign: false }); + data.append(FP16x16 { mag: 16701, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/softmax_axis_minus_1.cairo b/tests/nodes/softmax_axis_minus_1.cairo new file mode 100644 index 000000000..4dbe58eb1 --- /dev/null +++ b/tests/nodes/softmax_axis_minus_1.cairo @@ -0,0 +1,20 @@ +mod input_0; +mod output_0; + + +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::nn::FP16x16NN; +use orion::operators::nn::NNTrait; +use orion::numbers::FixedTrait; +use orion::operators::tensor::FP16x16TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_softmax_axis_minus_1() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::softmax(@input_0, Option::None); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/softmax_axis_minus_1/input_0.cairo b/tests/nodes/softmax_axis_minus_1/input_0.cairo new file mode 100644 index 000000000..5714cbcc6 --- /dev/null +++ b/tests/nodes/softmax_axis_minus_1/input_0.cairo @@ -0,0 +1,74 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(4); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 54590, sign: false }); + data.append(FP16x16 { mag: 61178, sign: false }); + data.append(FP16x16 { mag: 18943, sign: false }); + data.append(FP16x16 { mag: 9534, sign: false }); + data.append(FP16x16 { mag: 42629, sign: false }); + data.append(FP16x16 { mag: 42173, sign: false }); + data.append(FP16x16 { mag: 42740, sign: false }); + data.append(FP16x16 { mag: 113571, sign: false }); + data.append(FP16x16 { mag: 56924, sign: false }); + data.append(FP16x16 { mag: 41383, sign: false }); + data.append(FP16x16 { mag: 44527, sign: false }); + data.append(FP16x16 { mag: 4303, sign: false }); + data.append(FP16x16 { mag: 85774, sign: false }); + data.append(FP16x16 { mag: 55924, sign: false }); + data.append(FP16x16 { mag: 158412, sign: false }); + data.append(FP16x16 { mag: 23021, sign: false }); + data.append(FP16x16 { mag: 128482, sign: false }); + data.append(FP16x16 { mag: 55066, sign: false }); + data.append(FP16x16 { mag: 54463, sign: false }); + data.append(FP16x16 { mag: 95350, sign: false }); + data.append(FP16x16 { mag: 34220, sign: false }); + data.append(FP16x16 { mag: 50177, sign: false }); + data.append(FP16x16 { mag: 36693, sign: false }); + data.append(FP16x16 { mag: 32645, sign: false }); + data.append(FP16x16 { mag: 48677, sign: false }); + data.append(FP16x16 { mag: 18999, sign: false }); + data.append(FP16x16 { mag: 5924, sign: false }); + data.append(FP16x16 { mag: 13009, sign: false }); + data.append(FP16x16 { mag: 15491, sign: false }); + data.append(FP16x16 { mag: 32026, sign: false }); + data.append(FP16x16 { mag: 21516, sign: false }); + data.append(FP16x16 { mag: 58238, sign: false }); + data.append(FP16x16 { mag: 81336, sign: false }); + data.append(FP16x16 { mag: 23454, sign: false }); + data.append(FP16x16 { mag: 34911, sign: false }); + data.append(FP16x16 { mag: 50619, sign: false }); + data.append(FP16x16 { mag: 45742, sign: false }); + data.append(FP16x16 { mag: 48601, sign: false }); + data.append(FP16x16 { mag: 34036, sign: false }); + data.append(FP16x16 { mag: 57483, sign: false }); + data.append(FP16x16 { mag: 37301, sign: false }); + data.append(FP16x16 { mag: 100657, sign: false }); + data.append(FP16x16 { mag: 20333, sign: false }); + data.append(FP16x16 { mag: 32171, sign: false }); + data.append(FP16x16 { mag: 57271, sign: false }); + data.append(FP16x16 { mag: 75649, sign: false }); + data.append(FP16x16 { mag: 25921, sign: false }); + data.append(FP16x16 { mag: 989, sign: false }); + data.append(FP16x16 { mag: 35410, sign: false }); + data.append(FP16x16 { mag: 43432, sign: false }); + data.append(FP16x16 { mag: 34144, sign: false }); + data.append(FP16x16 { mag: 11011, sign: false }); + data.append(FP16x16 { mag: 101436, sign: false }); + data.append(FP16x16 { mag: 73487, sign: false }); + data.append(FP16x16 { mag: 9987, sign: false }); + data.append(FP16x16 { mag: 87623, sign: false }); + data.append(FP16x16 { mag: 35873, sign: false }); + data.append(FP16x16 { mag: 215046, sign: false }); + data.append(FP16x16 { mag: 8835, sign: false }); + data.append(FP16x16 { mag: 40411, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/softmax_axis_minus_1/output_0.cairo b/tests/nodes/softmax_axis_minus_1/output_0.cairo new file mode 100644 index 000000000..3ff8c002a --- /dev/null +++ b/tests/nodes/softmax_axis_minus_1/output_0.cairo @@ -0,0 +1,74 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(4); + shape.append(5); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 16293, sign: false }); + data.append(FP16x16 { mag: 18016, sign: false }); + data.append(FP16x16 { mag: 9457, sign: false }); + data.append(FP16x16 { mag: 8192, sign: false }); + data.append(FP16x16 { mag: 13575, sign: false }); + data.append(FP16x16 { mag: 9074, sign: false }); + data.append(FP16x16 { mag: 9153, sign: false }); + data.append(FP16x16 { mag: 26976, sign: false }); + data.append(FP16x16 { mag: 11365, sign: false }); + data.append(FP16x16 { mag: 8965, sign: false }); + data.append(FP16x16 { mag: 6367, sign: false }); + data.append(FP16x16 { mag: 3446, sign: false }); + data.append(FP16x16 { mag: 11948, sign: false }); + data.append(FP16x16 { mag: 7577, sign: false }); + data.append(FP16x16 { mag: 36196, sign: false }); + data.append(FP16x16 { mag: 5345, sign: false }); + data.append(FP16x16 { mag: 26720, sign: false }); + data.append(FP16x16 { mag: 8716, sign: false }); + data.append(FP16x16 { mag: 8636, sign: false }); + data.append(FP16x16 { mag: 16117, sign: false }); + data.append(FP16x16 { mag: 11835, sign: false }); + data.append(FP16x16 { mag: 15098, sign: false }); + data.append(FP16x16 { mag: 12290, sign: false }); + data.append(FP16x16 { mag: 11554, sign: false }); + data.append(FP16x16 { mag: 14756, sign: false }); + data.append(FP16x16 { mag: 13375, sign: false }); + data.append(FP16x16 { mag: 10956, sign: false }); + data.append(FP16x16 { mag: 12207, sign: false }); + data.append(FP16x16 { mag: 12678, sign: false }); + data.append(FP16x16 { mag: 16317, sign: false }); + data.append(FP16x16 { mag: 8738, sign: false }); + data.append(FP16x16 { mag: 15304, sign: false }); + data.append(FP16x16 { mag: 21770, sign: false }); + data.append(FP16x16 { mag: 9001, sign: false }); + data.append(FP16x16 { mag: 10720, sign: false }); + data.append(FP16x16 { mag: 13696, sign: false }); + data.append(FP16x16 { mag: 12714, sign: false }); + data.append(FP16x16 { mag: 13281, sign: false }); + data.append(FP16x16 { mag: 10634, sign: false }); + data.append(FP16x16 { mag: 15208, sign: false }); + data.append(FP16x16 { mag: 9807, sign: false }); + data.append(FP16x16 { mag: 25787, sign: false }); + data.append(FP16x16 { mag: 7570, sign: false }); + data.append(FP16x16 { mag: 9069, sign: false }); + data.append(FP16x16 { mag: 13301, sign: false }); + data.append(FP16x16 { mag: 22282, sign: false }); + data.append(FP16x16 { mag: 10433, sign: false }); + data.append(FP16x16 { mag: 7131, sign: false }); + data.append(FP16x16 { mag: 12058, sign: false }); + data.append(FP16x16 { mag: 13629, sign: false }); + data.append(FP16x16 { mag: 9350, sign: false }); + data.append(FP16x16 { mag: 6569, sign: false }); + data.append(FP16x16 { mag: 26106, sign: false }); + data.append(FP16x16 { mag: 17042, sign: false }); + data.append(FP16x16 { mag: 6467, sign: false }); + data.append(FP16x16 { mag: 7100, sign: false }); + data.append(FP16x16 { mag: 3223, sign: false }); + data.append(FP16x16 { mag: 49623, sign: false }); + data.append(FP16x16 { mag: 2133, sign: false }); + data.append(FP16x16 { mag: 3454, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/softmax_fp16x16/input_0.cairo b/tests/nodes/softmax_fp16x16/input_0.cairo deleted file mode 100644 index eb5a6bc69..000000000 --- a/tests/nodes/softmax_fp16x16/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 93078, sign: true }); - data.append(FP16x16 { mag: 53025, sign: false }); - data.append(FP16x16 { mag: 183455, sign: true }); - data.append(FP16x16 { mag: 61044, sign: true }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/softmax_fp16x16/output_0.cairo b/tests/nodes/softmax_fp16x16/output_0.cairo deleted file mode 100644 index 22ee5fe31..000000000 --- a/tests/nodes/softmax_fp16x16/output_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorSub}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 6366, sign: false }); - data.append(FP16x16 { mag: 59169, sign: false }); - data.append(FP16x16 { mag: 8768, sign: false }); - data.append(FP16x16 { mag: 56767, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/softmax_fp8x23.cairo b/tests/nodes/softmax_fp8x23.cairo deleted file mode 100644 index 616810b5a..000000000 --- a/tests/nodes/softmax_fp8x23.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::nn::NNTrait; -use orion::numbers::FixedTrait; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::nn::FP8x23NN; - -#[test] -#[available_gas(2000000000)] -fn test_softmax_fp8x23() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = NNTrait::softmax(@input_0, 0); - - assert_eq(y, z); -} diff --git a/tests/nodes/softmax_fp8x23/input_0.cairo b/tests/nodes/softmax_fp8x23/input_0.cairo deleted file mode 100644 index 673dc8546..000000000 --- a/tests/nodes/softmax_fp8x23/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 12134368, sign: false }); - data.append(FP8x23 { mag: 22252681, sign: true }); - data.append(FP8x23 { mag: 1818273, sign: false }); - data.append(FP8x23 { mag: 18287369, sign: true }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/softmax_fp8x23/output_0.cairo b/tests/nodes/softmax_fp8x23/output_0.cairo deleted file mode 100644 index a8f691a14..000000000 --- a/tests/nodes/softmax_fp8x23/output_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorSub}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 6490929, sign: false }); - data.append(FP8x23 { mag: 3221031, sign: false }); - data.append(FP8x23 { mag: 1897678, sign: false }); - data.append(FP8x23 { mag: 5167576, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} From 61e8542dadc6519cd3afd035ea7551939a26b6f2 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 25 Mar 2024 08:27:48 +0100 Subject: [PATCH 51/68] update doc --- docs/framework/operators/neural-network/nn.softmax.md | 6 +++--- src/operators/nn/core.cairo | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/framework/operators/neural-network/nn.softmax.md b/docs/framework/operators/neural-network/nn.softmax.md index 4fb83e0b3..c1a15f026 100644 --- a/docs/framework/operators/neural-network/nn.softmax.md +++ b/docs/framework/operators/neural-network/nn.softmax.md @@ -1,7 +1,7 @@ # NNTrait::softmax ```rust - fn softmax(tensor: @Tensor, axis: usize) -> Tensor; + fn softmax(tensor: @Tensor, axis: Option) -> Tensor; ``` Applies the Softmax function to an n-dimensional input Tensor rescaling them so that the elements of the n-dimensional output Tensor lie in the range \[0,1] and sum to 1. @@ -13,7 +13,7 @@ $$ ## Args * `tensor`(`@Tensor`) - The input tensor. -* `axis`(`usize`) - The axis along which to compute the softmax. +* `axis`(`usize`) - Describes the dimension Softmax will be performed on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). ## Returns @@ -44,7 +44,7 @@ fn softmax_example() -> Tensor { .span(), ); - return NNTrait::softmax(@tensor, 1); + return NNTrait::softmax(@tensor, Option::Some(1)); } >>> [[2255697,6132911],[2255697,6132911]] // The fixed point representation of diff --git a/src/operators/nn/core.cairo b/src/operators/nn/core.cairo index cc2f5c77c..af1b04a15 100644 --- a/src/operators/nn/core.cairo +++ b/src/operators/nn/core.cairo @@ -62,7 +62,7 @@ trait NNTrait { /// # NNTrait::softmax /// /// ```rust - /// fn softmax(tensor: @Tensor, axis: usize) -> Tensor; + /// fn softmax(tensor: @Tensor, axis: Option) -> Tensor; /// ``` /// /// Applies the Softmax function to an n-dimensional input Tensor rescaling them so that the elements of the n-dimensional output Tensor lie in the range \[0,1] and sum to 1. @@ -74,7 +74,7 @@ trait NNTrait { /// ## Args /// /// * `tensor`(`@Tensor`) - The input tensor. - /// * `axis`(`usize`) - The axis along which to compute the softmax. + /// * `axis`(`usize`) - Describes the dimension Softmax will be performed on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). /// /// ## Returns /// @@ -105,7 +105,7 @@ trait NNTrait { /// .span(), /// ); /// - /// return NNTrait::softmax(@tensor, 1); + /// return NNTrait::softmax(@tensor, Option::Some(1)); /// } /// >>> [[2255697,6132911],[2255697,6132911]] /// // The fixed point representation of From d6584a7e03874a37bcac7f0fd3dc3c5baf068432 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 25 Mar 2024 08:36:25 +0100 Subject: [PATCH 52/68] Update doc --- docs/framework/operators/neural-network/nn.softmax.md | 2 +- src/operators/nn/core.cairo | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/framework/operators/neural-network/nn.softmax.md b/docs/framework/operators/neural-network/nn.softmax.md index c1a15f026..23735312b 100644 --- a/docs/framework/operators/neural-network/nn.softmax.md +++ b/docs/framework/operators/neural-network/nn.softmax.md @@ -13,7 +13,7 @@ $$ ## Args * `tensor`(`@Tensor`) - The input tensor. -* `axis`(`usize`) - Describes the dimension Softmax will be performed on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). +* `axis`(`Option`) - Describes the dimension Softmax will be performed on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). ## Returns diff --git a/src/operators/nn/core.cairo b/src/operators/nn/core.cairo index af1b04a15..ad66197fd 100644 --- a/src/operators/nn/core.cairo +++ b/src/operators/nn/core.cairo @@ -74,7 +74,7 @@ trait NNTrait { /// ## Args /// /// * `tensor`(`@Tensor`) - The input tensor. - /// * `axis`(`usize`) - Describes the dimension Softmax will be performed on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). + /// * `axis`(`Option`) - Describes the dimension Softmax will be performed on. Negative value means counting dimensions from the back. Accepted range is [-r, r-1] where r = rank(input). /// /// ## Returns /// From 2d5d947fbf11bf8baea83593a1490b18fdc409fb Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 25 Mar 2024 08:51:00 +0100 Subject: [PATCH 53/68] refactor operator --- .../ml/linear/linear_classifier.cairo | 76 +++++++++++-------- src/operators/tensor/core.cairo | 4 +- .../tensor/implementations/tensor_bool.cairo | 4 +- .../implementations/tensor_complex64.cairo | 4 +- .../implementations/tensor_fp16x16.cairo | 4 +- .../implementations/tensor_fp16x16wide.cairo | 4 +- .../implementations/tensor_fp32x32.cairo | 4 +- .../implementations/tensor_fp64x64.cairo | 4 +- .../implementations/tensor_fp8x23.cairo | 4 +- .../implementations/tensor_fp8x23wide.cairo | 4 +- .../tensor/implementations/tensor_i32.cairo | 4 +- .../tensor/implementations/tensor_i8.cairo | 4 +- .../tensor/implementations/tensor_u32.cairo | 4 +- src/operators/tensor/math/argmax.cairo | 37 +++++---- 14 files changed, 89 insertions(+), 72 deletions(-) diff --git a/src/operators/ml/linear/linear_classifier.cairo b/src/operators/ml/linear/linear_classifier.cairo index 18cbd6988..003cf742b 100644 --- a/src/operators/ml/linear/linear_classifier.cairo +++ b/src/operators/ml/linear/linear_classifier.cairo @@ -197,7 +197,9 @@ impl LinearClassifierImpl< let mut labels = scores.argmax(1, Option::None, Option::None); loop { match labels.data.pop_front() { - Option::Some(i) => { labels_list.append(*classlabels[*i]); }, + Option::Some(i) => { + labels_list.append(*classlabels[(*i).try_into().unwrap()]); + }, Option::None => { break; } }; }; @@ -205,48 +207,56 @@ impl LinearClassifierImpl< let mut i = 0; match classifier.post_transform { POST_TRANSFORM::NONE => { - while i != scores.data.len() { - if *scores.data.at(i) >= NumberTrait::zero() { - labels_list.append(*classlabels[0]); - } else { - labels_list.append(0); - } + while i != scores + .data + .len() { + if *scores.data.at(i) >= NumberTrait::zero() { + labels_list.append(*classlabels[0]); + } else { + labels_list.append(0); + } - i += 1; - }; + i += 1; + }; }, POST_TRANSFORM::SOFTMAX => { - while i != scores.data.len() { - if *scores.data.at(i) >= NumberTrait::half() { - labels_list.append(*classlabels[0]); - } else { - labels_list.append(0); - } + while i != scores + .data + .len() { + if *scores.data.at(i) >= NumberTrait::half() { + labels_list.append(*classlabels[0]); + } else { + labels_list.append(0); + } - i += 1; - }; + i += 1; + }; }, POST_TRANSFORM::LOGISTIC => { - while i != scores.data.len() { - if *scores.data.at(i) >= NumberTrait::half() { - labels_list.append(*classlabels[0]); - } else { - labels_list.append(0); - } + while i != scores + .data + .len() { + if *scores.data.at(i) >= NumberTrait::half() { + labels_list.append(*classlabels[0]); + } else { + labels_list.append(0); + } - i += 1; - }; + i += 1; + }; }, POST_TRANSFORM::SOFTMAXZERO => { - while i != scores.data.len() { - if *scores.data.at(i) >= NumberTrait::half() { - labels_list.append(*classlabels[0]); - } else { - labels_list.append(0); - } + while i != scores + .data + .len() { + if *scores.data.at(i) >= NumberTrait::half() { + labels_list.append(*classlabels[0]); + } else { + labels_list.append(0); + } - i += 1; - }; + i += 1; + }; }, POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'), }; diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 70f551ec8..25ddc8dd3 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -765,8 +765,8 @@ trait TensorTrait { /// ``` /// fn argmax( - self: @Tensor, axis: usize, keepdims: Option, select_last_index: Option - ) -> Tensor; + self: @Tensor, axis: i32, keepdims: Option, select_last_index: Option + ) -> Tensor; /// # tensor.argmin /// /// ```rust diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index a6c54261c..d08a0a2c1 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -78,8 +78,8 @@ impl BoolTensor of TensorTrait { } fn argmax( - self: @Tensor, axis: usize, keepdims: Option, select_last_index: Option - ) -> Tensor { + self: @Tensor, axis: i32, keepdims: Option, select_last_index: Option + ) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index fa465b33e..2bbbab8fe 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -90,10 +90,10 @@ impl Complex64Tensor of TensorTrait { fn argmax( self: @Tensor, - axis: usize, + axis: i32, keepdims: Option, select_last_index: Option - ) -> Tensor { + ) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index e1795000b..e1a780e17 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -89,8 +89,8 @@ impl FP16x16Tensor of TensorTrait { } fn argmax( - self: @Tensor, axis: usize, keepdims: Option, select_last_index: Option - ) -> Tensor { + self: @Tensor, axis: i32, keepdims: Option, select_last_index: Option + ) -> Tensor { math::argmax::argmax(self, axis, keepdims, select_last_index) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index c2e9b9344..13ea883cf 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -94,10 +94,10 @@ impl FP16x16WTensor of TensorTrait { fn argmax( self: @Tensor, - axis: usize, + axis: i32, keepdims: Option, select_last_index: Option - ) -> Tensor { + ) -> Tensor { math::argmax::argmax(self, axis, keepdims, select_last_index) } diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 5cbf64139..cb745f113 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -86,8 +86,8 @@ impl FP32x32Tensor of TensorTrait { } fn argmax( - self: @Tensor, axis: usize, keepdims: Option, select_last_index: Option - ) -> Tensor { + self: @Tensor, axis: i32, keepdims: Option, select_last_index: Option + ) -> Tensor { math::argmax::argmax(self, axis, keepdims, select_last_index) } diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 3c18ae38a..0ee0f41d2 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -86,8 +86,8 @@ impl FP64x64Tensor of TensorTrait { } fn argmax( - self: @Tensor, axis: usize, keepdims: Option, select_last_index: Option - ) -> Tensor { + self: @Tensor, axis: i32, keepdims: Option, select_last_index: Option + ) -> Tensor { math::argmax::argmax(self, axis, keepdims, select_last_index) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index 33a19c080..c404e4641 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -86,8 +86,8 @@ impl FP8x23Tensor of TensorTrait { } fn argmax( - self: @Tensor, axis: usize, keepdims: Option, select_last_index: Option - ) -> Tensor { + self: @Tensor, axis: i32, keepdims: Option, select_last_index: Option + ) -> Tensor { math::argmax::argmax(self, axis, keepdims, select_last_index) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index 459657891..f415266c4 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -89,8 +89,8 @@ impl FP8x23WTensor of TensorTrait { } fn argmax( - self: @Tensor, axis: usize, keepdims: Option, select_last_index: Option - ) -> Tensor { + self: @Tensor, axis: i32, keepdims: Option, select_last_index: Option + ) -> Tensor { math::argmax::argmax(self, axis, keepdims, select_last_index) } diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index d4fa1b09c..3c3641310 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -87,8 +87,8 @@ impl I32Tensor of TensorTrait { } fn argmax( - self: @Tensor, axis: usize, keepdims: Option, select_last_index: Option - ) -> Tensor { + self: @Tensor, axis: i32, keepdims: Option, select_last_index: Option + ) -> Tensor { math::argmax::argmax(self, axis, keepdims, select_last_index) } diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 903d66638..a872a4665 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -84,8 +84,8 @@ impl I8Tensor of TensorTrait { } fn argmax( - self: @Tensor, axis: usize, keepdims: Option, select_last_index: Option - ) -> Tensor { + self: @Tensor, axis: i32, keepdims: Option, select_last_index: Option + ) -> Tensor { math::argmax::argmax(self, axis, keepdims, select_last_index) } diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index 01456aec6..e85316a96 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -83,8 +83,8 @@ impl U32Tensor of TensorTrait { } fn argmax( - self: @Tensor, axis: usize, keepdims: Option, select_last_index: Option - ) -> Tensor { + self: @Tensor, axis: i32, keepdims: Option, select_last_index: Option + ) -> Tensor { math::argmax::argmax(self, axis, keepdims, select_last_index) } diff --git a/src/operators/tensor/math/argmax.cairo b/src/operators/tensor/math/argmax.cairo index f16c99b5c..861e7b893 100644 --- a/src/operators/tensor/math/argmax.cairo +++ b/src/operators/tensor/math/argmax.cairo @@ -1,4 +1,6 @@ -use orion::operators::tensor::core::{Tensor, TensorTrait, ravel_index, unravel_index}; +use core::option::OptionTrait; +use core::traits::TryInto; +use orion::operators::tensor::{core::{Tensor, TensorTrait, ravel_index, unravel_index}, I32Tensor}; use orion::operators::tensor::helpers::{reduce_output_shape, len_from_shape, combine_indices}; use orion::numbers::NumberTrait; @@ -13,8 +15,8 @@ fn argmax< impl TCopy: Copy, impl TDrop: Drop, >( - self: @Tensor, axis: usize, keepdims: Option, select_last_index: Option -) -> Tensor { + self: @Tensor, axis: i32, keepdims: Option, select_last_index: Option +) -> Tensor { let keepdims = match keepdims { Option::Some(val) => val, Option::None => true, @@ -25,15 +27,21 @@ fn argmax< Option::None => false, }; - assert(axis <= (*self.shape).len(), 'axis out of dimensions'); + // Adjust the axis if it's negative + let axis: usize = if axis < 0 { + ((*self.shape).len().try_into().unwrap() + axis).try_into().unwrap() + } else { + axis.try_into().unwrap() + }; + assert(axis < (*self.shape).len(), 'axis out of dimensions'); if (*self.shape).len() == 1 { - return find_argmax_1D::(*self, axis, true, select_last_index); + return find_argmax_1D::(*self, axis, keepdims, select_last_index); } - let mut output_data: Array = array![]; + let mut output_data: Array = array![]; - let output_shape = reduce_output_shape(*self.shape, axis, false); + let output_shape = reduce_output_shape(*self.shape, axis, keepdims); let output_data_len = len_from_shape(output_shape); let MIN = NumberTrait::min_value(); @@ -48,7 +56,7 @@ fn argmax< index += 1; }; - TensorTrait::::new(reduce_output_shape(*self.shape, axis, keepdims), output_data.span()) + TensorTrait::::new(reduce_output_shape(*self.shape, axis, keepdims), output_data.span()) } /// Helper function that finds the index of the maximum value in a flat tensor. @@ -66,21 +74,20 @@ fn argmax< /// * A usize value representing the index of the maximum value along the specified axis. fn find_argmax_1D< T, - impl UsizeTensor: TensorTrait, impl TPartialOrd: PartialOrd, impl TPartialEq: PartialEq, impl TCopy: Copy, impl TDrop: Drop, >( mut input: Tensor, axis: usize, keepdims: bool, select_last_index: bool -) -> Tensor { - let mut output_data = ArrayTrait::::new(); +) -> Tensor { + let mut output_data = ArrayTrait::::new(); let mut max = match input.data.pop_front() { Option::Some(item) => *item, Option::None => { return TensorTrait::< - usize + i32 >::new(reduce_output_shape(input.shape, axis, keepdims), output_data.span()); } }; @@ -108,7 +115,7 @@ fn find_argmax_1D< output_data.append(max_index); return TensorTrait::< - usize + i32 >::new(reduce_output_shape(input.shape, axis, keepdims), output_data.span()); } @@ -143,9 +150,9 @@ fn find_argmax< max_value: T, argmax: usize, select_last_index: bool -) -> usize { +) -> i32 { if axis_index == *(*input.shape)[axis] { - return argmax; + return argmax.try_into().unwrap(); } let input_indices = combine_indices(output_indices, axis_index, axis); From 0a2f576d99239f414c20598e05ef4da8f241e94d Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 25 Mar 2024 09:31:15 +0100 Subject: [PATCH 54/68] test operator --- nodegen/node/argmax.py | 717 +++--------------- src/operators/tensor/math/argmax.cairo | 23 +- tests/nodes.cairo | 53 +- ...iro => argmax_default_axes_keepdims.cairo} | 16 +- .../input_0.cairo | 8 +- .../output_0.cairo} | 11 +- ...ault_axes_keepdims_select_last_index.cairo | 22 + .../input_0.cairo | 8 +- .../output_0.cairo} | 11 +- .../argmax_fp16x16_1D_default/input_0.cairo | 15 - .../argmax_fp16x16_1D_default/output_0.cairo | 12 - .../output_0.cairo | 12 - .../input_0.cairo | 15 - .../output_0.cairo | 12 - .../argmax_fp16x16_2D_keepdims_false.cairo | 22 - .../nodes/argmax_fp16x16_2D_last_index.cairo | 22 - .../output_0.cairo | 14 - .../argmax_fp16x16_3D_default/input_0.cairo | 22 - .../argmax_fp16x16_3D_keepdims_false.cairo | 22 - .../input_0.cairo | 22 - .../output_0.cairo | 16 - .../nodes/argmax_fp16x16_3D_last_index.cairo | 22 - .../input_0.cairo | 22 - .../output_0.cairo | 17 - tests/nodes/argmax_fp8x23_1D_default.cairo | 22 - .../argmax_fp8x23_1D_default/input_0.cairo | 15 - .../argmax_fp8x23_1D_default/output_0.cairo | 12 - .../argmax_fp8x23_1D_keepdims_false.cairo | 22 - .../input_0.cairo | 15 - .../output_0.cairo | 12 - tests/nodes/argmax_fp8x23_1D_last_index.cairo | 22 - .../argmax_fp8x23_1D_last_index/input_0.cairo | 15 - .../output_0.cairo | 12 - tests/nodes/argmax_fp8x23_2D_default.cairo | 22 - .../argmax_fp8x23_2D_default/input_0.cairo | 17 - .../argmax_fp8x23_2D_keepdims_false.cairo | 22 - .../input_0.cairo | 17 - .../output_0.cairo | 13 - tests/nodes/argmax_fp8x23_2D_last_index.cairo | 22 - .../argmax_fp8x23_2D_last_index/input_0.cairo | 17 - .../output_0.cairo | 14 - tests/nodes/argmax_fp8x23_3D_default.cairo | 22 - .../argmax_fp8x23_3D_default/input_0.cairo | 22 - .../argmax_fp8x23_3D_default/output_0.cairo | 17 - .../argmax_fp8x23_3D_keepdims_false.cairo | 22 - .../input_0.cairo | 22 - .../output_0.cairo | 16 - tests/nodes/argmax_fp8x23_3D_last_index.cairo | 22 - .../argmax_fp8x23_3D_last_index/input_0.cairo | 22 - .../output_0.cairo | 17 - tests/nodes/argmax_i32_1D_default.cairo | 22 - .../argmax_i32_1D_default/output_0.cairo | 12 - .../nodes/argmax_i32_1D_keepdims_false.cairo | 22 - .../input_0.cairo | 14 - .../output_0.cairo | 12 - tests/nodes/argmax_i32_1D_last_index.cairo | 22 - .../argmax_i32_1D_last_index/input_0.cairo | 14 - .../argmax_i32_1D_last_index/output_0.cairo | 12 - tests/nodes/argmax_i32_2D_default.cairo | 22 - .../argmax_i32_2D_default/output_0.cairo | 14 - .../nodes/argmax_i32_2D_keepdims_false.cairo | 22 - .../output_0.cairo | 13 - tests/nodes/argmax_i32_2D_last_index.cairo | 22 - .../argmax_i32_2D_last_index/output_0.cairo | 14 - tests/nodes/argmax_i32_3D_default.cairo | 22 - .../nodes/argmax_i32_3D_default/input_0.cairo | 21 - .../argmax_i32_3D_default/output_0.cairo | 17 - .../nodes/argmax_i32_3D_keepdims_false.cairo | 22 - .../input_0.cairo | 21 - .../output_0.cairo | 16 - tests/nodes/argmax_i32_3D_last_index.cairo | 22 - .../argmax_i32_3D_last_index/input_0.cairo | 21 - .../argmax_i32_3D_last_index/output_0.cairo | 17 - tests/nodes/argmax_i8_1D_default.cairo | 22 - .../nodes/argmax_i8_1D_default/input_0.cairo | 14 - .../nodes/argmax_i8_1D_default/output_0.cairo | 12 - tests/nodes/argmax_i8_1D_keepdims_false.cairo | 22 - .../argmax_i8_1D_keepdims_false/input_0.cairo | 14 - .../output_0.cairo | 12 - tests/nodes/argmax_i8_1D_last_index.cairo | 22 - .../argmax_i8_1D_last_index/input_0.cairo | 14 - .../argmax_i8_1D_last_index/output_0.cairo | 12 - tests/nodes/argmax_i8_2D_default.cairo | 22 - .../nodes/argmax_i8_2D_default/input_0.cairo | 16 - .../nodes/argmax_i8_2D_default/output_0.cairo | 14 - tests/nodes/argmax_i8_2D_keepdims_false.cairo | 22 - .../argmax_i8_2D_keepdims_false/input_0.cairo | 16 - .../output_0.cairo | 13 - tests/nodes/argmax_i8_2D_last_index.cairo | 22 - .../argmax_i8_2D_last_index/input_0.cairo | 16 - .../argmax_i8_2D_last_index/output_0.cairo | 14 - tests/nodes/argmax_i8_3D_default.cairo | 22 - .../nodes/argmax_i8_3D_default/input_0.cairo | 21 - .../nodes/argmax_i8_3D_default/output_0.cairo | 17 - tests/nodes/argmax_i8_3D_keepdims_false.cairo | 22 - .../argmax_i8_3D_keepdims_false/input_0.cairo | 21 - .../output_0.cairo | 16 - tests/nodes/argmax_i8_3D_last_index.cairo | 22 - .../argmax_i8_3D_last_index/input_0.cairo | 21 - .../argmax_i8_3D_last_index/output_0.cairo | 17 - ...1D_default.cairo => argmax_keepdims.cairo} | 16 +- .../input_0.cairo | 8 +- .../output_0.cairo | 7 +- ...> argmax_keepdims_select_last_index.cairo} | 16 +- .../input_0.cairo | 10 +- .../output_0.cairo} | 11 +- ...ro => argmax_negative_axis_keepdims.cairo} | 16 +- .../input_0.cairo | 17 + .../output_0.cairo | 7 +- ...tive_axis_keepdims_select_last_index.cairo | 22 + .../input_0.cairo | 17 + .../output_0.cairo | 10 +- ...default.cairo => argmax_no_keepdims.cairo} | 16 +- tests/nodes/argmax_no_keepdims/input_0.cairo | 17 + .../output_0.cairo | 5 +- ...argmax_no_keepdims_select_last_index.cairo | 22 + .../input_0.cairo | 17 + .../output_0.cairo} | 10 +- tests/nodes/argmax_u32_1D_default.cairo | 20 - .../nodes/argmax_u32_1D_default/input_0.cairo | 14 - .../argmax_u32_1D_default/output_0.cairo | 12 - .../nodes/argmax_u32_1D_keepdims_false.cairo | 20 - .../input_0.cairo | 14 - .../output_0.cairo | 12 - tests/nodes/argmax_u32_1D_last_index.cairo | 20 - .../argmax_u32_1D_last_index/input_0.cairo | 14 - .../argmax_u32_1D_last_index/output_0.cairo | 12 - tests/nodes/argmax_u32_2D_default.cairo | 20 - .../nodes/argmax_u32_2D_default/input_0.cairo | 16 - .../argmax_u32_2D_default/output_0.cairo | 14 - .../nodes/argmax_u32_2D_keepdims_false.cairo | 20 - .../input_0.cairo | 16 - .../output_0.cairo | 13 - tests/nodes/argmax_u32_2D_last_index.cairo | 20 - .../argmax_u32_2D_last_index/input_0.cairo | 16 - .../argmax_u32_2D_last_index/output_0.cairo | 14 - tests/nodes/argmax_u32_3D_default.cairo | 20 - .../nodes/argmax_u32_3D_default/input_0.cairo | 21 - .../argmax_u32_3D_default/output_0.cairo | 17 - .../nodes/argmax_u32_3D_keepdims_false.cairo | 20 - .../input_0.cairo | 21 - .../output_0.cairo | 16 - tests/nodes/argmax_u32_3D_last_index.cairo | 20 - .../argmax_u32_3D_last_index/input_0.cairo | 21 - .../argmax_u32_3D_last_index/output_0.cairo | 17 - 145 files changed, 332 insertions(+), 2875 deletions(-) rename tests/nodes/{argmax_fp16x16_1D_last_index.cairo => argmax_default_axes_keepdims.cairo} (56%) rename tests/nodes/{argmax_fp16x16_2D_keepdims_false => argmax_default_axes_keepdims}/input_0.cairo (65%) rename tests/nodes/{argmax_i32_2D_keepdims_false/input_0.cairo => argmax_default_axes_keepdims/output_0.cairo} (69%) create mode 100644 tests/nodes/argmax_default_axes_keepdims_select_last_index.cairo rename tests/nodes/{argmax_fp16x16_2D_last_index => argmax_default_axes_keepdims_select_last_index}/input_0.cairo (65%) rename tests/nodes/{argmax_i32_2D_last_index/input_0.cairo => argmax_default_axes_keepdims_select_last_index/output_0.cairo} (69%) delete mode 100644 tests/nodes/argmax_fp16x16_1D_default/input_0.cairo delete mode 100644 tests/nodes/argmax_fp16x16_1D_default/output_0.cairo delete mode 100644 tests/nodes/argmax_fp16x16_1D_keepdims_false/output_0.cairo delete mode 100644 tests/nodes/argmax_fp16x16_1D_last_index/input_0.cairo delete mode 100644 tests/nodes/argmax_fp16x16_1D_last_index/output_0.cairo delete mode 100644 tests/nodes/argmax_fp16x16_2D_keepdims_false.cairo delete mode 100644 tests/nodes/argmax_fp16x16_2D_last_index.cairo delete mode 100644 tests/nodes/argmax_fp16x16_2D_last_index/output_0.cairo delete mode 100644 tests/nodes/argmax_fp16x16_3D_default/input_0.cairo delete mode 100644 tests/nodes/argmax_fp16x16_3D_keepdims_false.cairo delete mode 100644 tests/nodes/argmax_fp16x16_3D_keepdims_false/input_0.cairo delete mode 100644 tests/nodes/argmax_fp16x16_3D_keepdims_false/output_0.cairo delete mode 100644 tests/nodes/argmax_fp16x16_3D_last_index.cairo delete mode 100644 tests/nodes/argmax_fp16x16_3D_last_index/input_0.cairo delete mode 100644 tests/nodes/argmax_fp16x16_3D_last_index/output_0.cairo delete mode 100644 tests/nodes/argmax_fp8x23_1D_default.cairo delete mode 100644 tests/nodes/argmax_fp8x23_1D_default/input_0.cairo delete mode 100644 tests/nodes/argmax_fp8x23_1D_default/output_0.cairo delete mode 100644 tests/nodes/argmax_fp8x23_1D_keepdims_false.cairo delete mode 100644 tests/nodes/argmax_fp8x23_1D_keepdims_false/input_0.cairo delete mode 100644 tests/nodes/argmax_fp8x23_1D_keepdims_false/output_0.cairo delete mode 100644 tests/nodes/argmax_fp8x23_1D_last_index.cairo delete mode 100644 tests/nodes/argmax_fp8x23_1D_last_index/input_0.cairo delete mode 100644 tests/nodes/argmax_fp8x23_1D_last_index/output_0.cairo delete mode 100644 tests/nodes/argmax_fp8x23_2D_default.cairo delete mode 100644 tests/nodes/argmax_fp8x23_2D_default/input_0.cairo delete mode 100644 tests/nodes/argmax_fp8x23_2D_keepdims_false.cairo delete mode 100644 tests/nodes/argmax_fp8x23_2D_keepdims_false/input_0.cairo delete mode 100644 tests/nodes/argmax_fp8x23_2D_keepdims_false/output_0.cairo delete mode 100644 tests/nodes/argmax_fp8x23_2D_last_index.cairo delete mode 100644 tests/nodes/argmax_fp8x23_2D_last_index/input_0.cairo delete mode 100644 tests/nodes/argmax_fp8x23_2D_last_index/output_0.cairo delete mode 100644 tests/nodes/argmax_fp8x23_3D_default.cairo delete mode 100644 tests/nodes/argmax_fp8x23_3D_default/input_0.cairo delete mode 100644 tests/nodes/argmax_fp8x23_3D_default/output_0.cairo delete mode 100644 tests/nodes/argmax_fp8x23_3D_keepdims_false.cairo delete mode 100644 tests/nodes/argmax_fp8x23_3D_keepdims_false/input_0.cairo delete mode 100644 tests/nodes/argmax_fp8x23_3D_keepdims_false/output_0.cairo delete mode 100644 tests/nodes/argmax_fp8x23_3D_last_index.cairo delete mode 100644 tests/nodes/argmax_fp8x23_3D_last_index/input_0.cairo delete mode 100644 tests/nodes/argmax_fp8x23_3D_last_index/output_0.cairo delete mode 100644 tests/nodes/argmax_i32_1D_default.cairo delete mode 100644 tests/nodes/argmax_i32_1D_default/output_0.cairo delete mode 100644 tests/nodes/argmax_i32_1D_keepdims_false.cairo delete mode 100644 tests/nodes/argmax_i32_1D_keepdims_false/input_0.cairo delete mode 100644 tests/nodes/argmax_i32_1D_keepdims_false/output_0.cairo delete mode 100644 tests/nodes/argmax_i32_1D_last_index.cairo delete mode 100644 tests/nodes/argmax_i32_1D_last_index/input_0.cairo delete mode 100644 tests/nodes/argmax_i32_1D_last_index/output_0.cairo delete mode 100644 tests/nodes/argmax_i32_2D_default.cairo delete mode 100644 tests/nodes/argmax_i32_2D_default/output_0.cairo delete mode 100644 tests/nodes/argmax_i32_2D_keepdims_false.cairo delete mode 100644 tests/nodes/argmax_i32_2D_keepdims_false/output_0.cairo delete mode 100644 tests/nodes/argmax_i32_2D_last_index.cairo delete mode 100644 tests/nodes/argmax_i32_2D_last_index/output_0.cairo delete mode 100644 tests/nodes/argmax_i32_3D_default.cairo delete mode 100644 tests/nodes/argmax_i32_3D_default/input_0.cairo delete mode 100644 tests/nodes/argmax_i32_3D_default/output_0.cairo delete mode 100644 tests/nodes/argmax_i32_3D_keepdims_false.cairo delete mode 100644 tests/nodes/argmax_i32_3D_keepdims_false/input_0.cairo delete mode 100644 tests/nodes/argmax_i32_3D_keepdims_false/output_0.cairo delete mode 100644 tests/nodes/argmax_i32_3D_last_index.cairo delete mode 100644 tests/nodes/argmax_i32_3D_last_index/input_0.cairo delete mode 100644 tests/nodes/argmax_i32_3D_last_index/output_0.cairo delete mode 100644 tests/nodes/argmax_i8_1D_default.cairo delete mode 100644 tests/nodes/argmax_i8_1D_default/input_0.cairo delete mode 100644 tests/nodes/argmax_i8_1D_default/output_0.cairo delete mode 100644 tests/nodes/argmax_i8_1D_keepdims_false.cairo delete mode 100644 tests/nodes/argmax_i8_1D_keepdims_false/input_0.cairo delete mode 100644 tests/nodes/argmax_i8_1D_keepdims_false/output_0.cairo delete mode 100644 tests/nodes/argmax_i8_1D_last_index.cairo delete mode 100644 tests/nodes/argmax_i8_1D_last_index/input_0.cairo delete mode 100644 tests/nodes/argmax_i8_1D_last_index/output_0.cairo delete mode 100644 tests/nodes/argmax_i8_2D_default.cairo delete mode 100644 tests/nodes/argmax_i8_2D_default/input_0.cairo delete mode 100644 tests/nodes/argmax_i8_2D_default/output_0.cairo delete mode 100644 tests/nodes/argmax_i8_2D_keepdims_false.cairo delete mode 100644 tests/nodes/argmax_i8_2D_keepdims_false/input_0.cairo delete mode 100644 tests/nodes/argmax_i8_2D_keepdims_false/output_0.cairo delete mode 100644 tests/nodes/argmax_i8_2D_last_index.cairo delete mode 100644 tests/nodes/argmax_i8_2D_last_index/input_0.cairo delete mode 100644 tests/nodes/argmax_i8_2D_last_index/output_0.cairo delete mode 100644 tests/nodes/argmax_i8_3D_default.cairo delete mode 100644 tests/nodes/argmax_i8_3D_default/input_0.cairo delete mode 100644 tests/nodes/argmax_i8_3D_default/output_0.cairo delete mode 100644 tests/nodes/argmax_i8_3D_keepdims_false.cairo delete mode 100644 tests/nodes/argmax_i8_3D_keepdims_false/input_0.cairo delete mode 100644 tests/nodes/argmax_i8_3D_keepdims_false/output_0.cairo delete mode 100644 tests/nodes/argmax_i8_3D_last_index.cairo delete mode 100644 tests/nodes/argmax_i8_3D_last_index/input_0.cairo delete mode 100644 tests/nodes/argmax_i8_3D_last_index/output_0.cairo rename tests/nodes/{argmax_fp16x16_1D_default.cairo => argmax_keepdims.cairo} (58%) rename tests/nodes/{argmax_fp16x16_2D_default => argmax_keepdims}/input_0.cairo (65%) rename tests/nodes/{argmax_fp8x23_2D_default => argmax_keepdims}/output_0.cairo (72%) rename tests/nodes/{argmax_fp16x16_1D_keepdims_false.cairo => argmax_keepdims_select_last_index.cairo} (56%) rename tests/nodes/{argmax_fp16x16_1D_keepdims_false => argmax_keepdims_select_last_index}/input_0.cairo (60%) rename tests/nodes/{argmax_i32_2D_default/input_0.cairo => argmax_keepdims_select_last_index/output_0.cairo} (68%) rename tests/nodes/{argmax_fp16x16_2D_default.cairo => argmax_negative_axis_keepdims.cairo} (56%) create mode 100644 tests/nodes/argmax_negative_axis_keepdims/input_0.cairo rename tests/nodes/{argmax_fp16x16_2D_default => argmax_negative_axis_keepdims}/output_0.cairo (72%) create mode 100644 tests/nodes/argmax_negative_axis_keepdims_select_last_index.cairo create mode 100644 tests/nodes/argmax_negative_axis_keepdims_select_last_index/input_0.cairo rename tests/nodes/{argmax_fp16x16_3D_default => argmax_negative_axis_keepdims_select_last_index}/output_0.cairo (67%) rename tests/nodes/{argmax_fp16x16_3D_default.cairo => argmax_no_keepdims.cairo} (57%) create mode 100644 tests/nodes/argmax_no_keepdims/input_0.cairo rename tests/nodes/{argmax_fp16x16_2D_keepdims_false => argmax_no_keepdims}/output_0.cairo (70%) create mode 100644 tests/nodes/argmax_no_keepdims_select_last_index.cairo create mode 100644 tests/nodes/argmax_no_keepdims_select_last_index/input_0.cairo rename tests/nodes/{argmax_i32_1D_default/input_0.cairo => argmax_no_keepdims_select_last_index/output_0.cairo} (69%) delete mode 100644 tests/nodes/argmax_u32_1D_default.cairo delete mode 100644 tests/nodes/argmax_u32_1D_default/input_0.cairo delete mode 100644 tests/nodes/argmax_u32_1D_default/output_0.cairo delete mode 100644 tests/nodes/argmax_u32_1D_keepdims_false.cairo delete mode 100644 tests/nodes/argmax_u32_1D_keepdims_false/input_0.cairo delete mode 100644 tests/nodes/argmax_u32_1D_keepdims_false/output_0.cairo delete mode 100644 tests/nodes/argmax_u32_1D_last_index.cairo delete mode 100644 tests/nodes/argmax_u32_1D_last_index/input_0.cairo delete mode 100644 tests/nodes/argmax_u32_1D_last_index/output_0.cairo delete mode 100644 tests/nodes/argmax_u32_2D_default.cairo delete mode 100644 tests/nodes/argmax_u32_2D_default/input_0.cairo delete mode 100644 tests/nodes/argmax_u32_2D_default/output_0.cairo delete mode 100644 tests/nodes/argmax_u32_2D_keepdims_false.cairo delete mode 100644 tests/nodes/argmax_u32_2D_keepdims_false/input_0.cairo delete mode 100644 tests/nodes/argmax_u32_2D_keepdims_false/output_0.cairo delete mode 100644 tests/nodes/argmax_u32_2D_last_index.cairo delete mode 100644 tests/nodes/argmax_u32_2D_last_index/input_0.cairo delete mode 100644 tests/nodes/argmax_u32_2D_last_index/output_0.cairo delete mode 100644 tests/nodes/argmax_u32_3D_default.cairo delete mode 100644 tests/nodes/argmax_u32_3D_default/input_0.cairo delete mode 100644 tests/nodes/argmax_u32_3D_default/output_0.cairo delete mode 100644 tests/nodes/argmax_u32_3D_keepdims_false.cairo delete mode 100644 tests/nodes/argmax_u32_3D_keepdims_false/input_0.cairo delete mode 100644 tests/nodes/argmax_u32_3D_keepdims_false/output_0.cairo delete mode 100644 tests/nodes/argmax_u32_3D_last_index.cairo delete mode 100644 tests/nodes/argmax_u32_3D_last_index/input_0.cairo delete mode 100644 tests/nodes/argmax_u32_3D_last_index/output_0.cairo diff --git a/nodegen/node/argmax.py b/nodegen/node/argmax.py index c874b2eea..69132adfe 100644 --- a/nodegen/node/argmax.py +++ b/nodegen/node/argmax.py @@ -3,673 +3,134 @@ from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl -def argmax_use_numpy(data: np.ndarray, axis: int = 0, keepdims: int = 1, dtype=np.int64) -> np.ndarray: +def argmax_use_numpy(data: np.ndarray, axis: int = 0, keepdims: int = 1) -> np.ndarray: result = np.argmax(data, axis=axis) if keepdims == 1: result = np.expand_dims(result, axis) - return result.astype(dtype) + return result.astype(np.int64) def argmax_use_numpy_select_last_index( - data: np.ndarray, axis: int = 0, keepdims: int = True, dtype=np.int64 + data: np.ndarray, axis: int = 0, keepdims: int = True ) -> np.ndarray: data = np.flip(data, axis) result = np.argmax(data, axis=axis) result = data.shape[axis] - result - 1 if keepdims: result = np.expand_dims(result, axis) - return result.astype(dtype) + return result.astype(np.int64) class Argmax(RunAll): @staticmethod - def argmax_u32(): - def argmax_1D(): - def default_params(): - x = np.random.randint(0, 255, (3)).astype(np.uint32) - y = argmax_use_numpy(x, dtype=np.uint32).reshape((1)) + def no_keepdims(): + data = np.array([[2, 1], [3, 10]], dtype=np.float32) + axis = 1 + keepdims = 0 + result = argmax_use_numpy(data, axis=axis, keepdims=keepdims) - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) + x = Tensor(Dtype.FP16x16, data.shape, data.flatten()) + y = Tensor(Dtype.I32, result.shape, result.flatten()) - name = "argmax_u32_1D_default" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::None(()))", name) - - def keepdims_false(): - x = np.random.randint(0, 255, (3)).astype(np.uint32) - y = argmax_use_numpy( - x, keepdims=0, dtype=np.uint32).reshape((1)) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_u32_1D_keepdims_false" - make_test( - [x], y, "input_0.argmax(0, Option::Some(false), Option::None(()))", name) - - def last_index(): - x = np.random.randint(0, 255, (3)).astype(np.uint32) - y = argmax_use_numpy_select_last_index( - x, dtype=np.uint32).reshape((1)) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_u32_1D_last_index" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::Some(true))", name) - - default_params() - keepdims_false() - last_index() - argmax_1D() - - def argmax_2D(): - def default_params(): - x = np.random.randint(0, 255, (2, 2)).astype(np.uint32) - y = argmax_use_numpy(x, dtype=np.uint32) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_u32_2D_default" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::None(()))", name) - - def keepdims_false(): - x = np.random.randint(0, 255, (2, 2)).astype(np.uint32) - y = argmax_use_numpy( - x, keepdims=0, dtype=np.uint32) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_u32_2D_keepdims_false" - make_test( - [x], y, "input_0.argmax(0, Option::Some(false), Option::None(()))", name) - - def last_index(): - x = np.random.randint(0, 255, (2, 2)).astype(np.uint32) - y = argmax_use_numpy_select_last_index( - x, dtype=np.uint32) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_u32_2D_last_index" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::Some(true))", name) - - default_params() - keepdims_false() - last_index() - argmax_2D() - - def argmax_3D(): - def default_params(): - x = np.random.randint(0, 255, (2, 2, 2)).astype(np.uint32) - y = argmax_use_numpy(x, dtype=np.uint32) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_u32_3D_default" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::None(()))", name) - - def keepdims_false(): - x = np.random.randint(0, 255, (2, 2, 2)).astype(np.uint32) - y = argmax_use_numpy( - x, keepdims=0, dtype=np.uint32) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_u32_3D_keepdims_false" - make_test( - [x], y, "input_0.argmax(0, Option::Some(false), Option::None(()))", name) - - def last_index(): - x = np.random.randint(0, 255, (2, 2, 2)).astype(np.uint32) - y = argmax_use_numpy_select_last_index( - x, dtype=np.uint32) - - x = Tensor(Dtype.U32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_u32_3D_last_index" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::Some(true))", name) - - default_params() - keepdims_false() - last_index() - argmax_3D() + name = "argmax_no_keepdims" + make_test( + [x], y, "input_0.argmax(1, Option::Some(false), Option::None(()))", name) @staticmethod - def argmax_i32(): - def argmax_1D(): - def default_params(): - x = np.random.randint(-127, 127, (3)).astype(np.int32) - y = argmax_use_numpy(x, dtype=np.uint32).reshape((1)) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_i32_1D_default" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::None(()))", name) - - def keepdims_false(): - x = np.random.randint(-127, 127, (3)).astype(np.int32) - y = argmax_use_numpy( - x, keepdims=0, dtype=np.uint32).reshape((1)) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_i32_1D_keepdims_false" - make_test( - [x], y, "input_0.argmax(0, Option::Some(false), Option::None(()))", name) - - def last_index(): - x = np.random.randint(0, 255, (3)).astype(np.int32) - y = argmax_use_numpy_select_last_index( - x, dtype=np.uint32).reshape((1)) + def keepdims(): + data = np.array([[2, 1], [3, 10]], dtype=np.float32) + axis = 1 + keepdims = 1 + result = argmax_use_numpy(data, axis=axis, keepdims=keepdims) - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) + x = Tensor(Dtype.FP16x16, data.shape, data.flatten()) + y = Tensor(Dtype.I32, result.shape, result.flatten()) - name = "argmax_i32_1D_last_index" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::Some(true))", name) - - default_params() - keepdims_false() - last_index() - argmax_1D() - - def argmax_2D(): - def default_params(): - x = np.random.randint(-127, 127, (2, 2)).astype(np.int32) - y = argmax_use_numpy(x, dtype=np.uint32) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_i32_2D_default" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::None(()))", name) - - def keepdims_false(): - x = np.random.randint(-127, 127, (2, 2)).astype(np.int32) - y = argmax_use_numpy( - x, keepdims=0, dtype=np.uint32) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_i32_2D_keepdims_false" - make_test( - [x], y, "input_0.argmax(0, Option::Some(false), Option::None(()))", name) - - def last_index(): - x = np.random.randint(-127, 127, (2, 2)).astype(np.int32) - y = argmax_use_numpy_select_last_index( - x, dtype=np.int32) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_i32_2D_last_index" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::Some(true))", name) - - default_params() - keepdims_false() - last_index() - argmax_2D() - - def argmax_3D(): - def default_params(): - x = np.random.randint(-127, 127, (2, 2, 2)).astype(np.int32) - y = argmax_use_numpy(x, dtype=np.uint32) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_i32_3D_default" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::None(()))", name) - - def keepdims_false(): - x = np.random.randint(-127, 127, (2, 2, 2)).astype(np.int32) - y = argmax_use_numpy( - x, keepdims=0, dtype=np.uint32) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_i32_3D_keepdims_false" - make_test( - [x], y, "input_0.argmax(0, Option::Some(false), Option::None(()))", name) - - def last_index(): - x = np.random.randint(-127, 127, (2, 2, 2)).astype(np.int32) - y = argmax_use_numpy_select_last_index( - x, dtype=np.uint32) - - x = Tensor(Dtype.I32, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_i32_3D_last_index" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::Some(true))", name) - - default_params() - keepdims_false() - last_index() - argmax_3D() + name = "argmax_keepdims" + make_test( + [x], y, "input_0.argmax(1, Option::Some(true), Option::None(()))", name) @staticmethod - def argmax_i8(): - def argmax_1D(): - def default_params(): - x = np.random.randint(-127, 127, (3)).astype(np.int8) - y = argmax_use_numpy(x, dtype=np.uint32).reshape((1)) - - x = Tensor(Dtype.I8, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_i8_1D_default" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::None(()))", name) - - def keepdims_false(): - x = np.random.randint(-127, 127, (3)).astype(np.int8) - y = argmax_use_numpy( - x, keepdims=0, dtype=np.uint32).reshape((1)) - - x = Tensor(Dtype.I8, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_i8_1D_keepdims_false" - make_test( - [x], y, "input_0.argmax(0, Option::Some(false), Option::None(()))", name) - - def last_index(): - x = np.random.randint(0, 255, (3)).astype(np.int8) - y = argmax_use_numpy_select_last_index( - x, dtype=np.uint32).reshape((1)) - - x = Tensor(Dtype.I8, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_i8_1D_last_index" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::Some(true))", name) - - default_params() - keepdims_false() - last_index() - argmax_1D() - - def argmax_2D(): - def default_params(): - x = np.random.randint(-127, 127, (2, 2)).astype(np.int8) - y = argmax_use_numpy(x, dtype=np.uint32) - - x = Tensor(Dtype.I8, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_i8_2D_default" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::None(()))", name) + def default_axes_keepdims(): + data = np.array([[2, 1], [3, 10]], dtype=np.float32) + keepdims = 1 + result = argmax_use_numpy(data, keepdims=keepdims) - def keepdims_false(): - x = np.random.randint(-127, 127, (2, 2)).astype(np.int8) - y = argmax_use_numpy( - x, keepdims=0, dtype=np.uint32) + x = Tensor(Dtype.FP16x16, data.shape, data.flatten()) + y = Tensor(Dtype.I32, result.shape, result.flatten()) - x = Tensor(Dtype.I8, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_i8_2D_keepdims_false" - make_test( - [x], y, "input_0.argmax(0, Option::Some(false), Option::None(()))", name) - - def last_index(): - x = np.random.randint(-127, 127, (2, 2)).astype(np.int8) - y = argmax_use_numpy_select_last_index( - x, dtype=np.int8) - - x = Tensor(Dtype.I8, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_i8_2D_last_index" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::Some(true))", name) - - default_params() - keepdims_false() - last_index() - argmax_2D() - - def argmax_3D(): - def default_params(): - x = np.random.randint(-127, 127, (2, 2, 2)).astype(np.int8) - y = argmax_use_numpy(x, dtype=np.uint32) - - x = Tensor(Dtype.I8, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_i8_3D_default" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::None(()))", name) - - def keepdims_false(): - x = np.random.randint(-127, 127, (2, 2, 2)).astype(np.int8) - y = argmax_use_numpy( - x, keepdims=0, dtype=np.uint32) - - x = Tensor(Dtype.I8, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_i8_3D_keepdims_false" - make_test( - [x], y, "input_0.argmax(0, Option::Some(false), Option::None(()))", name) - - def last_index(): - x = np.random.randint(-127, 127, (2, 2, 2)).astype(np.int8) - y = argmax_use_numpy_select_last_index( - x, dtype=np.uint32) - - x = Tensor(Dtype.I8, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_i8_3D_last_index" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::Some(true))", name) - - default_params() - keepdims_false() - last_index() - argmax_3D() + name = "argmax_default_axes_keepdims" + make_test( + [x], y, "input_0.argmax(0, Option::Some(true), Option::None(()))", name) @staticmethod - def argmax_fp16x16(): - def argmax_1D(): - def default_params(): - x = to_fp(np.random.randint(-127, 127, (3) - ).astype(np.int8), FixedImpl.FP16x16) - y = argmax_use_numpy(x, dtype=np.uint32).reshape((1)) - - x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_fp16x16_1D_default" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::None(()))", name) - - def keepdims_false(): - x = to_fp(np.random.randint(-127, 127, (3) - ).astype(np.int8), FixedImpl.FP16x16) - y = argmax_use_numpy( - x, keepdims=0, dtype=np.uint32).reshape((1)) - - x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_fp16x16_1D_keepdims_false" - make_test( - [x], y, "input_0.argmax(0, Option::Some(false), Option::None(()))", name) - - def last_index(): - x = to_fp(np.random.randint(0, 255, (3)).astype( - np.int8), FixedImpl.FP16x16) - y = argmax_use_numpy_select_last_index( - x, dtype=np.uint32).reshape((1)) - - x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_fp16x16_1D_last_index" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::Some(true))", name) - - default_params() - keepdims_false() - last_index() - argmax_1D() - - def argmax_2D(): - def default_params(): - x = to_fp(np.random.randint(-127, 127, (2, 2) - ).astype(np.int8), FixedImpl.FP16x16) - y = argmax_use_numpy(x, dtype=np.uint32) - - x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_fp16x16_2D_default" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::None(()))", name) - - def keepdims_false(): - x = to_fp(np.random.randint(-127, 127, (2, 2) - ).astype(np.int8), FixedImpl.FP16x16) - y = argmax_use_numpy( - x, keepdims=0, dtype=np.uint32) - - x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_fp16x16_2D_keepdims_false" - make_test( - [x], y, "input_0.argmax(0, Option::Some(false), Option::None(()))", name) - - def last_index(): - x = to_fp(np.random.randint(-127, 127, (2, 2) - ).astype(np.int8), FixedImpl.FP16x16) - y = argmax_use_numpy_select_last_index( - x, dtype=np.int8) - - x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_fp16x16_2D_last_index" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::Some(true))", name) + def negative_axis_keepdims(): + data = np.array([[2, 1], [3, 10]], dtype=np.float32) + axis = -1 + keepdims = 1 + result = argmax_use_numpy(data, axis=axis, keepdims=keepdims) - default_params() - keepdims_false() - last_index() - argmax_2D() + x = Tensor(Dtype.FP16x16, data.shape, data.flatten()) + y = Tensor(Dtype.I32, result.shape, result.flatten()) - def argmax_3D(): - def default_params(): - x = to_fp(np.random.randint(-127, 127, (2, 2, 2) - ).astype(np.int8), FixedImpl.FP16x16) - y = argmax_use_numpy(x, dtype=np.uint32) - - x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_fp16x16_3D_default" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::None(()))", name) - - def keepdims_false(): - x = to_fp(np.random.randint(-127, 127, (2, 2, 2) - ).astype(np.int8), FixedImpl.FP16x16) - y = argmax_use_numpy( - x, keepdims=0, dtype=np.uint32) - - x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_fp16x16_3D_keepdims_false" - make_test( - [x], y, "input_0.argmax(0, Option::Some(false), Option::None(()))", name) - - def last_index(): - x = to_fp(np.random.randint(-127, 127, (2, 2, 2) - ).astype(np.int8), FixedImpl.FP16x16) - y = argmax_use_numpy_select_last_index( - x, dtype=np.uint32) - - x = Tensor(Dtype.FP16x16, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_fp16x16_3D_last_index" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::Some(true))", name) - - default_params() - keepdims_false() - last_index() - argmax_3D() + name = "argmax_negative_axis_keepdims" + make_test( + [x], y, "input_0.argmax(-1, Option::Some(true), Option::None(()))", name) @staticmethod - def argmax_fp8x23(): - def argmax_1D(): - def default_params(): - x = to_fp(np.random.randint(-127, 127, (3) - ).astype(np.int8), FixedImpl.FP8x23) - y = argmax_use_numpy(x, dtype=np.uint32).reshape((1)) - - x = Tensor(Dtype.FP8x23, x.shape, - x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_fp8x23_1D_default" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::None(()))", name) - - def keepdims_false(): - x = to_fp(np.random.randint(-127, 127, (3) - ).astype(np.int8), FixedImpl.FP8x23) - y = argmax_use_numpy( - x, keepdims=0, dtype=np.uint32).reshape((1)) - - x = Tensor(Dtype.FP8x23, x.shape, - x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_fp8x23_1D_keepdims_false" - make_test( - [x], y, "input_0.argmax(0, Option::Some(false), Option::None(()))", name) - - def last_index(): - x = to_fp(np.random.randint(0, 255, (3)).astype( - np.int8), FixedImpl.FP8x23) - y = argmax_use_numpy_select_last_index( - x, dtype=np.uint32).reshape((1)) - - x = Tensor(Dtype.FP8x23, x.shape, - x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) + def no_keepdims_select_last_index(): + data = np.array([[2, 2], [3, 10]], dtype=np.float32) + axis = 1 + keepdims = 0 + result = argmax_use_numpy_select_last_index( + data, axis=axis, keepdims=keepdims) - name = "argmax_fp8x23_1D_last_index" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::Some(true))", name) + x = Tensor(Dtype.FP16x16, data.shape, data.flatten()) + y = Tensor(Dtype.I32, result.shape, result.flatten()) - default_params() - keepdims_false() - last_index() - argmax_1D() + name = "argmax_no_keepdims_select_last_index" + make_test( + [x], y, "input_0.argmax(1, Option::Some(false), Option::Some(true))", name) - def argmax_2D(): - def default_params(): - x = to_fp(np.random.randint(-127, 127, (2, 2) - ).astype(np.int8), FixedImpl.FP8x23) - y = argmax_use_numpy(x, dtype=np.uint32) - - x = Tensor(Dtype.FP8x23, x.shape, x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_fp8x23_2D_default" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::None(()))", name) - - def keepdims_false(): - x = to_fp(np.random.randint(-127, 127, (2, 2) - ).astype(np.int8), FixedImpl.FP8x23) - y = argmax_use_numpy( - x, keepdims=0, dtype=np.uint32) - - x = Tensor(Dtype.FP8x23, x.shape, - x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_fp8x23_2D_keepdims_false" - make_test( - [x], y, "input_0.argmax(0, Option::Some(false), Option::None(()))", name) - - def last_index(): - x = to_fp(np.random.randint(-127, 127, (2, 2) - ).astype(np.int8), FixedImpl.FP8x23) - y = argmax_use_numpy_select_last_index( - x, dtype=np.int8) - - x = Tensor(Dtype.FP8x23, x.shape, - x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_fp8x23_2D_last_index" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::Some(true))", name) - - default_params() - keepdims_false() - last_index() - argmax_2D() - - def argmax_3D(): - def default_params(): - x = to_fp(np.random.randint(-127, 127, (2, 2, 2) - ).astype(np.int8), FixedImpl.FP8x23) - y = argmax_use_numpy(x, dtype=np.uint32) - - x = Tensor(Dtype.FP8x23, x.shape, - x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) - - name = "argmax_fp8x23_3D_default" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::None(()))", name) - - def keepdims_false(): - x = to_fp(np.random.randint(-127, 127, (2, 2, 2) - ).astype(np.int8), FixedImpl.FP8x23) - y = argmax_use_numpy( - x, keepdims=0, dtype=np.uint32) + @staticmethod + def keepdims_select_last_index(): + data = np.array([[2, 2], [3, 10]], dtype=np.float32) + axis = 1 + keepdims = 1 + result = argmax_use_numpy_select_last_index( + data, axis=axis, keepdims=keepdims) - x = Tensor(Dtype.FP8x23, x.shape, - x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) + x = Tensor(Dtype.FP16x16, data.shape, data.flatten()) + y = Tensor(Dtype.I32, result.shape, result.flatten()) - name = "argmax_fp8x23_3D_keepdims_false" - make_test( - [x], y, "input_0.argmax(0, Option::Some(false), Option::None(()))", name) + name = "argmax_keepdims_select_last_index" + make_test( + [x], y, "input_0.argmax(1, Option::Some(true), Option::Some(true))", name) - def last_index(): - x = to_fp(np.random.randint(-127, 127, (2, 2, 2) - ).astype(np.int8), FixedImpl.FP8x23) - y = argmax_use_numpy_select_last_index( - x, dtype=np.uint32) + @staticmethod + def default_axes_keepdims_select_last_index(): + data = np.array([[2, 2], [3, 10]], dtype=np.float32) + keepdims = 1 + result = argmax_use_numpy_select_last_index(data, keepdims=keepdims) - x = Tensor(Dtype.FP8x23, x.shape, - x.flatten()) - y = Tensor(Dtype.U32, y.shape, y.flatten()) + x = Tensor(Dtype.FP16x16, data.shape, data.flatten()) + y = Tensor(Dtype.I32, result.shape, result.flatten()) - name = "argmax_fp8x23_3D_last_index" - make_test( - [x], y, "input_0.argmax(0, Option::None(()), Option::Some(true))", name) + name = "argmax_default_axes_keepdims_select_last_index" + make_test( + [x], y, "input_0.argmax(0, Option::Some(true), Option::Some(true))", name) - default_params() - keepdims_false() - last_index() - argmax_3D() + @staticmethod + def negative_axis_keepdims_select_last_index(): + data = np.array([[2, 2], [3, 10]], dtype=np.float32) + axis = -1 + keepdims = 1 + result = argmax_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims) + + x = Tensor(Dtype.FP16x16, data.shape, data.flatten()) + y = Tensor(Dtype.I32, result.shape, result.flatten()) + + name = "argmax_negative_axis_keepdims_select_last_index" + make_test( + [x], y, "input_0.argmax(-1, Option::Some(true), Option::Some(true))", name) diff --git a/src/operators/tensor/math/argmax.cairo b/src/operators/tensor/math/argmax.cairo index 861e7b893..1a48d3d8b 100644 --- a/src/operators/tensor/math/argmax.cairo +++ b/src/operators/tensor/math/argmax.cairo @@ -8,7 +8,6 @@ use orion::numbers::NumberTrait; fn argmax< T, MAG, - impl UsizeTensor: TensorTrait, impl TNumber: NumberTrait, impl TPartialOrd: PartialOrd, impl TPartialEq: PartialEq, @@ -17,31 +16,25 @@ fn argmax< >( self: @Tensor, axis: i32, keepdims: Option, select_last_index: Option ) -> Tensor { - let keepdims = match keepdims { - Option::Some(val) => val, - Option::None => true, - }; - - let select_last_index = match select_last_index { - Option::Some(val) => val, - Option::None => false, - }; + let keepdims = keepdims.unwrap_or(true); + let select_last_index = select_last_index.unwrap_or(false); - // Adjust the axis if it's negative - let axis: usize = if axis < 0 { + // Convert negative axis to positive + let axis = if axis < 0 { ((*self.shape).len().try_into().unwrap() + axis).try_into().unwrap() } else { axis.try_into().unwrap() }; - assert(axis < (*self.shape).len(), 'axis out of dimensions'); + + assert(axis <= (*self.shape).len(), 'axis out of dimensions'); if (*self.shape).len() == 1 { - return find_argmax_1D::(*self, axis, keepdims, select_last_index); + return find_argmax_1D::(*self, axis, true, select_last_index); } let mut output_data: Array = array![]; - let output_shape = reduce_output_shape(*self.shape, axis, keepdims); + let output_shape = reduce_output_shape(*self.shape, axis, false); let output_data_len = len_from_shape(output_shape); let MIN = NumberTrait::min_value(); diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 19b1a7df9..a8b5f3db6 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -16,51 +16,6 @@ // mod add_i8_broadcast; // mod add_u32; // mod add_u32_broadcast; -// mod argmax_fp16x16_1D_default; -// mod argmax_fp16x16_1D_keepdims_false; -// mod argmax_fp16x16_1D_last_index; -// mod argmax_fp16x16_2D_default; -// mod argmax_fp16x16_2D_keepdims_false; -// mod argmax_fp16x16_2D_last_index; -// mod argmax_fp16x16_3D_default; -// mod argmax_fp16x16_3D_keepdims_false; -// mod argmax_fp16x16_3D_last_index; -// mod argmax_fp8x23_1D_default; -// mod argmax_fp8x23_1D_keepdims_false; -// mod argmax_fp8x23_1D_last_index; -// mod argmax_fp8x23_2D_default; -// mod argmax_fp8x23_2D_keepdims_false; -// mod argmax_fp8x23_2D_last_index; -// mod argmax_fp8x23_3D_default; -// mod argmax_fp8x23_3D_keepdims_false; -// mod argmax_fp8x23_3D_last_index; -// mod argmax_i32_1D_default; -// mod argmax_i32_1D_keepdims_false; -// mod argmax_i32_1D_last_index; -// mod argmax_i32_2D_default; -// mod argmax_i32_2D_keepdims_false; -// mod argmax_i32_2D_last_index; -// mod argmax_i32_3D_default; -// mod argmax_i32_3D_keepdims_false; -// mod argmax_i32_3D_last_index; -// mod argmax_i8_1D_default; -// mod argmax_i8_1D_keepdims_false; -// mod argmax_i8_1D_last_index; -// mod argmax_i8_2D_default; -// mod argmax_i8_2D_keepdims_false; -// mod argmax_i8_2D_last_index; -// mod argmax_i8_3D_default; -// mod argmax_i8_3D_keepdims_false; -// mod argmax_i8_3D_last_index; -// mod argmax_u32_1D_default; -// mod argmax_u32_1D_keepdims_false; -// mod argmax_u32_1D_last_index; -// mod argmax_u32_2D_default; -// mod argmax_u32_2D_keepdims_false; -// mod argmax_u32_2D_last_index; -// mod argmax_u32_3D_default; -// mod argmax_u32_3D_keepdims_false; -// mod argmax_u32_3D_last_index; // mod argmin_fp16x16_1D_default; // mod argmin_fp16x16_1D_keepdims_false; // mod argmin_fp16x16_1D_last_index; @@ -1022,3 +977,11 @@ mod softmax_axis_0; mod softmax_axis_1; mod softmax_axis_2; mod softmax_axis_minus_1; +mod argmax_default_axes_keepdims; +mod argmax_default_axes_keepdims_select_last_index; +mod argmax_keepdims; +mod argmax_keepdims_select_last_index; +mod argmax_negative_axis_keepdims; +mod argmax_negative_axis_keepdims_select_last_index; +mod argmax_no_keepdims; +mod argmax_no_keepdims_select_last_index; diff --git a/tests/nodes/argmax_fp16x16_1D_last_index.cairo b/tests/nodes/argmax_default_axes_keepdims.cairo similarity index 56% rename from tests/nodes/argmax_fp16x16_1D_last_index.cairo rename to tests/nodes/argmax_default_axes_keepdims.cairo index fad13c40f..ba970e3b1 100644 --- a/tests/nodes/argmax_fp16x16_1D_last_index.cairo +++ b/tests/nodes/argmax_default_axes_keepdims.cairo @@ -2,21 +2,21 @@ mod input_0; mod output_0; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::FP16x16TensorPartialEq; #[test] #[available_gas(2000000000)] -fn test_argmax_fp16x16_1D_last_index() { +fn test_argmax_default_axes_keepdims() { let input_0 = input_0::input_0(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.argmax(0, Option::None(()), Option::Some(true)); + let y_0 = input_0.argmax(0, Option::Some(true), Option::None(())); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/argmax_fp16x16_2D_keepdims_false/input_0.cairo b/tests/nodes/argmax_default_axes_keepdims/input_0.cairo similarity index 65% rename from tests/nodes/argmax_fp16x16_2D_keepdims_false/input_0.cairo rename to tests/nodes/argmax_default_axes_keepdims/input_0.cairo index 906f29fff..2bd0e9ded 100644 --- a/tests/nodes/argmax_fp16x16_2D_keepdims_false/input_0.cairo +++ b/tests/nodes/argmax_default_axes_keepdims/input_0.cairo @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 4521984, sign: false }); - data.append(FP16x16 { mag: 7667712, sign: true }); - data.append(FP16x16 { mag: 393216, sign: true }); - data.append(FP16x16 { mag: 6356992, sign: true }); + data.append(FP16x16 { mag: 2, sign: false }); + data.append(FP16x16 { mag: 1, sign: false }); + data.append(FP16x16 { mag: 3, sign: false }); + data.append(FP16x16 { mag: 10, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/argmax_i32_2D_keepdims_false/input_0.cairo b/tests/nodes/argmax_default_axes_keepdims/output_0.cairo similarity index 69% rename from tests/nodes/argmax_i32_2D_keepdims_false/input_0.cairo rename to tests/nodes/argmax_default_axes_keepdims/output_0.cairo index 4b1340a16..83db954c1 100644 --- a/tests/nodes/argmax_i32_2D_keepdims_false/input_0.cairo +++ b/tests/nodes/argmax_default_axes_keepdims/output_0.cairo @@ -1,16 +1,15 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn input_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); - shape.append(2); + shape.append(1); shape.append(2); let mut data = ArrayTrait::new(); - data.append(15); - data.append(57); - data.append(-83); - data.append(13); + data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/argmax_default_axes_keepdims_select_last_index.cairo b/tests/nodes/argmax_default_axes_keepdims_select_last_index.cairo new file mode 100644 index 000000000..868a5f4a2 --- /dev/null +++ b/tests/nodes/argmax_default_axes_keepdims_select_last_index.cairo @@ -0,0 +1,22 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::FP16x16TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_argmax_default_axes_keepdims_select_last_index() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.argmax(0, Option::Some(true), Option::Some(true)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/argmax_fp16x16_2D_last_index/input_0.cairo b/tests/nodes/argmax_default_axes_keepdims_select_last_index/input_0.cairo similarity index 65% rename from tests/nodes/argmax_fp16x16_2D_last_index/input_0.cairo rename to tests/nodes/argmax_default_axes_keepdims_select_last_index/input_0.cairo index 708f630e3..c0673e50d 100644 --- a/tests/nodes/argmax_fp16x16_2D_last_index/input_0.cairo +++ b/tests/nodes/argmax_default_axes_keepdims_select_last_index/input_0.cairo @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 1310720, sign: true }); - data.append(FP16x16 { mag: 3670016, sign: true }); - data.append(FP16x16 { mag: 6422528, sign: true }); - data.append(FP16x16 { mag: 5636096, sign: false }); + data.append(FP16x16 { mag: 2, sign: false }); + data.append(FP16x16 { mag: 2, sign: false }); + data.append(FP16x16 { mag: 3, sign: false }); + data.append(FP16x16 { mag: 10, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/argmax_i32_2D_last_index/input_0.cairo b/tests/nodes/argmax_default_axes_keepdims_select_last_index/output_0.cairo similarity index 69% rename from tests/nodes/argmax_i32_2D_last_index/input_0.cairo rename to tests/nodes/argmax_default_axes_keepdims_select_last_index/output_0.cairo index f06f44db7..83db954c1 100644 --- a/tests/nodes/argmax_i32_2D_last_index/input_0.cairo +++ b/tests/nodes/argmax_default_axes_keepdims_select_last_index/output_0.cairo @@ -1,16 +1,15 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn input_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); - shape.append(2); + shape.append(1); shape.append(2); let mut data = ArrayTrait::new(); - data.append(-31); - data.append(-9); - data.append(-103); - data.append(-88); + data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/argmax_fp16x16_1D_default/input_0.cairo b/tests/nodes/argmax_fp16x16_1D_default/input_0.cairo deleted file mode 100644 index 1d26625fd..000000000 --- a/tests/nodes/argmax_fp16x16_1D_default/input_0.cairo +++ /dev/null @@ -1,15 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 4194304, sign: true }); - data.append(FP16x16 { mag: 917504, sign: false }); - data.append(FP16x16 { mag: 5832704, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp16x16_1D_default/output_0.cairo b/tests/nodes/argmax_fp16x16_1D_default/output_0.cairo deleted file mode 100644 index 23846d97b..000000000 --- a/tests/nodes/argmax_fp16x16_1D_default/output_0.cairo +++ /dev/null @@ -1,12 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp16x16_1D_keepdims_false/output_0.cairo b/tests/nodes/argmax_fp16x16_1D_keepdims_false/output_0.cairo deleted file mode 100644 index e4dbc507c..000000000 --- a/tests/nodes/argmax_fp16x16_1D_keepdims_false/output_0.cairo +++ /dev/null @@ -1,12 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp16x16_1D_last_index/input_0.cairo b/tests/nodes/argmax_fp16x16_1D_last_index/input_0.cairo deleted file mode 100644 index 9f3f8fb9f..000000000 --- a/tests/nodes/argmax_fp16x16_1D_last_index/input_0.cairo +++ /dev/null @@ -1,15 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 2359296, sign: true }); - data.append(FP16x16 { mag: 5701632, sign: false }); - data.append(FP16x16 { mag: 6684672, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp16x16_1D_last_index/output_0.cairo b/tests/nodes/argmax_fp16x16_1D_last_index/output_0.cairo deleted file mode 100644 index 23846d97b..000000000 --- a/tests/nodes/argmax_fp16x16_1D_last_index/output_0.cairo +++ /dev/null @@ -1,12 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp16x16_2D_keepdims_false.cairo b/tests/nodes/argmax_fp16x16_2D_keepdims_false.cairo deleted file mode 100644 index 32fa5de64..000000000 --- a/tests/nodes/argmax_fp16x16_2D_keepdims_false.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_fp16x16_2D_keepdims_false() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::Some(false), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_fp16x16_2D_last_index.cairo b/tests/nodes/argmax_fp16x16_2D_last_index.cairo deleted file mode 100644 index 3c0061e3e..000000000 --- a/tests/nodes/argmax_fp16x16_2D_last_index.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_fp16x16_2D_last_index() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::Some(true)); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_fp16x16_2D_last_index/output_0.cairo b/tests/nodes/argmax_fp16x16_2D_last_index/output_0.cairo deleted file mode 100644 index a550af121..000000000 --- a/tests/nodes/argmax_fp16x16_2D_last_index/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp16x16_3D_default/input_0.cairo b/tests/nodes/argmax_fp16x16_3D_default/input_0.cairo deleted file mode 100644 index d2f4ba6a0..000000000 --- a/tests/nodes/argmax_fp16x16_3D_default/input_0.cairo +++ /dev/null @@ -1,22 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 720896, sign: true }); - data.append(FP16x16 { mag: 4390912, sign: false }); - data.append(FP16x16 { mag: 5111808, sign: true }); - data.append(FP16x16 { mag: 6553600, sign: false }); - data.append(FP16x16 { mag: 1245184, sign: false }); - data.append(FP16x16 { mag: 6094848, sign: true }); - data.append(FP16x16 { mag: 4718592, sign: false }); - data.append(FP16x16 { mag: 3211264, sign: true }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp16x16_3D_keepdims_false.cairo b/tests/nodes/argmax_fp16x16_3D_keepdims_false.cairo deleted file mode 100644 index dfd529ffb..000000000 --- a/tests/nodes/argmax_fp16x16_3D_keepdims_false.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_fp16x16_3D_keepdims_false() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::Some(false), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_fp16x16_3D_keepdims_false/input_0.cairo b/tests/nodes/argmax_fp16x16_3D_keepdims_false/input_0.cairo deleted file mode 100644 index 392ed4928..000000000 --- a/tests/nodes/argmax_fp16x16_3D_keepdims_false/input_0.cairo +++ /dev/null @@ -1,22 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 1572864, sign: false }); - data.append(FP16x16 { mag: 2424832, sign: true }); - data.append(FP16x16 { mag: 6422528, sign: false }); - data.append(FP16x16 { mag: 5242880, sign: false }); - data.append(FP16x16 { mag: 1703936, sign: true }); - data.append(FP16x16 { mag: 6291456, sign: false }); - data.append(FP16x16 { mag: 589824, sign: true }); - data.append(FP16x16 { mag: 7733248, sign: true }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp16x16_3D_keepdims_false/output_0.cairo b/tests/nodes/argmax_fp16x16_3D_keepdims_false/output_0.cairo deleted file mode 100644 index 825125d8a..000000000 --- a/tests/nodes/argmax_fp16x16_3D_keepdims_false/output_0.cairo +++ /dev/null @@ -1,16 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp16x16_3D_last_index.cairo b/tests/nodes/argmax_fp16x16_3D_last_index.cairo deleted file mode 100644 index 72b0e2642..000000000 --- a/tests/nodes/argmax_fp16x16_3D_last_index.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP16x16TensorPartialEq; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_fp16x16_3D_last_index() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::Some(true)); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_fp16x16_3D_last_index/input_0.cairo b/tests/nodes/argmax_fp16x16_3D_last_index/input_0.cairo deleted file mode 100644 index 50ac50fc8..000000000 --- a/tests/nodes/argmax_fp16x16_3D_last_index/input_0.cairo +++ /dev/null @@ -1,22 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::numbers::{FixedTrait, FP16x16}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 6291456, sign: true }); - data.append(FP16x16 { mag: 7077888, sign: true }); - data.append(FP16x16 { mag: 1966080, sign: false }); - data.append(FP16x16 { mag: 4063232, sign: false }); - data.append(FP16x16 { mag: 5439488, sign: false }); - data.append(FP16x16 { mag: 1114112, sign: true }); - data.append(FP16x16 { mag: 1441792, sign: false }); - data.append(FP16x16 { mag: 458752, sign: true }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp16x16_3D_last_index/output_0.cairo b/tests/nodes/argmax_fp16x16_3D_last_index/output_0.cairo deleted file mode 100644 index 9c34c6139..000000000 --- a/tests/nodes/argmax_fp16x16_3D_last_index/output_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp8x23_1D_default.cairo b/tests/nodes/argmax_fp8x23_1D_default.cairo deleted file mode 100644 index 022e60b2f..000000000 --- a/tests/nodes/argmax_fp8x23_1D_default.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::FP8x23TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_fp8x23_1D_default() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_fp8x23_1D_default/input_0.cairo b/tests/nodes/argmax_fp8x23_1D_default/input_0.cairo deleted file mode 100644 index 96ca7af0a..000000000 --- a/tests/nodes/argmax_fp8x23_1D_default/input_0.cairo +++ /dev/null @@ -1,15 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 1040187392, sign: false }); - data.append(FP8x23 { mag: 637534208, sign: true }); - data.append(FP8x23 { mag: 830472192, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp8x23_1D_default/output_0.cairo b/tests/nodes/argmax_fp8x23_1D_default/output_0.cairo deleted file mode 100644 index e4dbc507c..000000000 --- a/tests/nodes/argmax_fp8x23_1D_default/output_0.cairo +++ /dev/null @@ -1,12 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp8x23_1D_keepdims_false.cairo b/tests/nodes/argmax_fp8x23_1D_keepdims_false.cairo deleted file mode 100644 index cb6cea94f..000000000 --- a/tests/nodes/argmax_fp8x23_1D_keepdims_false.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::FP8x23TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_fp8x23_1D_keepdims_false() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::Some(false), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_fp8x23_1D_keepdims_false/input_0.cairo b/tests/nodes/argmax_fp8x23_1D_keepdims_false/input_0.cairo deleted file mode 100644 index 35f085f6e..000000000 --- a/tests/nodes/argmax_fp8x23_1D_keepdims_false/input_0.cairo +++ /dev/null @@ -1,15 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 402653184, sign: false }); - data.append(FP8x23 { mag: 905969664, sign: true }); - data.append(FP8x23 { mag: 822083584, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp8x23_1D_keepdims_false/output_0.cairo b/tests/nodes/argmax_fp8x23_1D_keepdims_false/output_0.cairo deleted file mode 100644 index 23846d97b..000000000 --- a/tests/nodes/argmax_fp8x23_1D_keepdims_false/output_0.cairo +++ /dev/null @@ -1,12 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp8x23_1D_last_index.cairo b/tests/nodes/argmax_fp8x23_1D_last_index.cairo deleted file mode 100644 index e2a7f0985..000000000 --- a/tests/nodes/argmax_fp8x23_1D_last_index.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::FP8x23TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_fp8x23_1D_last_index() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::Some(true)); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_fp8x23_1D_last_index/input_0.cairo b/tests/nodes/argmax_fp8x23_1D_last_index/input_0.cairo deleted file mode 100644 index 7613acbd4..000000000 --- a/tests/nodes/argmax_fp8x23_1D_last_index/input_0.cairo +++ /dev/null @@ -1,15 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 369098752, sign: false }); - data.append(FP8x23 { mag: 847249408, sign: true }); - data.append(FP8x23 { mag: 612368384, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp8x23_1D_last_index/output_0.cairo b/tests/nodes/argmax_fp8x23_1D_last_index/output_0.cairo deleted file mode 100644 index 23846d97b..000000000 --- a/tests/nodes/argmax_fp8x23_1D_last_index/output_0.cairo +++ /dev/null @@ -1,12 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp8x23_2D_default.cairo b/tests/nodes/argmax_fp8x23_2D_default.cairo deleted file mode 100644 index 0e474f982..000000000 --- a/tests/nodes/argmax_fp8x23_2D_default.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::FP8x23TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_fp8x23_2D_default() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_fp8x23_2D_default/input_0.cairo b/tests/nodes/argmax_fp8x23_2D_default/input_0.cairo deleted file mode 100644 index d3ddea114..000000000 --- a/tests/nodes/argmax_fp8x23_2D_default/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 654311424, sign: true }); - data.append(FP8x23 { mag: 469762048, sign: false }); - data.append(FP8x23 { mag: 964689920, sign: true }); - data.append(FP8x23 { mag: 662700032, sign: false }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp8x23_2D_keepdims_false.cairo b/tests/nodes/argmax_fp8x23_2D_keepdims_false.cairo deleted file mode 100644 index 40fe14c02..000000000 --- a/tests/nodes/argmax_fp8x23_2D_keepdims_false.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::FP8x23TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_fp8x23_2D_keepdims_false() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::Some(false), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_fp8x23_2D_keepdims_false/input_0.cairo b/tests/nodes/argmax_fp8x23_2D_keepdims_false/input_0.cairo deleted file mode 100644 index efb3441d3..000000000 --- a/tests/nodes/argmax_fp8x23_2D_keepdims_false/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 536870912, sign: true }); - data.append(FP8x23 { mag: 469762048, sign: false }); - data.append(FP8x23 { mag: 83886080, sign: true }); - data.append(FP8x23 { mag: 620756992, sign: true }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp8x23_2D_keepdims_false/output_0.cairo b/tests/nodes/argmax_fp8x23_2D_keepdims_false/output_0.cairo deleted file mode 100644 index edc68637b..000000000 --- a/tests/nodes/argmax_fp8x23_2D_keepdims_false/output_0.cairo +++ /dev/null @@ -1,13 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp8x23_2D_last_index.cairo b/tests/nodes/argmax_fp8x23_2D_last_index.cairo deleted file mode 100644 index 05510e4f2..000000000 --- a/tests/nodes/argmax_fp8x23_2D_last_index.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::FP8x23TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_fp8x23_2D_last_index() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::Some(true)); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_fp8x23_2D_last_index/input_0.cairo b/tests/nodes/argmax_fp8x23_2D_last_index/input_0.cairo deleted file mode 100644 index be2e02d4c..000000000 --- a/tests/nodes/argmax_fp8x23_2D_last_index/input_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 335544320, sign: false }); - data.append(FP8x23 { mag: 1031798784, sign: true }); - data.append(FP8x23 { mag: 989855744, sign: true }); - data.append(FP8x23 { mag: 813694976, sign: true }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp8x23_2D_last_index/output_0.cairo b/tests/nodes/argmax_fp8x23_2D_last_index/output_0.cairo deleted file mode 100644 index a550af121..000000000 --- a/tests/nodes/argmax_fp8x23_2D_last_index/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp8x23_3D_default.cairo b/tests/nodes/argmax_fp8x23_3D_default.cairo deleted file mode 100644 index 4ca1b45c4..000000000 --- a/tests/nodes/argmax_fp8x23_3D_default.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::FP8x23TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_fp8x23_3D_default() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_fp8x23_3D_default/input_0.cairo b/tests/nodes/argmax_fp8x23_3D_default/input_0.cairo deleted file mode 100644 index 4512a7d3c..000000000 --- a/tests/nodes/argmax_fp8x23_3D_default/input_0.cairo +++ /dev/null @@ -1,22 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 612368384, sign: false }); - data.append(FP8x23 { mag: 578813952, sign: false }); - data.append(FP8x23 { mag: 947912704, sign: false }); - data.append(FP8x23 { mag: 201326592, sign: true }); - data.append(FP8x23 { mag: 1031798784, sign: true }); - data.append(FP8x23 { mag: 729808896, sign: false }); - data.append(FP8x23 { mag: 922746880, sign: false }); - data.append(FP8x23 { mag: 33554432, sign: true }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp8x23_3D_default/output_0.cairo b/tests/nodes/argmax_fp8x23_3D_default/output_0.cairo deleted file mode 100644 index 07cf1b47e..000000000 --- a/tests/nodes/argmax_fp8x23_3D_default/output_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(0); - data.append(1); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp8x23_3D_keepdims_false.cairo b/tests/nodes/argmax_fp8x23_3D_keepdims_false.cairo deleted file mode 100644 index 4d5a5b850..000000000 --- a/tests/nodes/argmax_fp8x23_3D_keepdims_false.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::FP8x23TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_fp8x23_3D_keepdims_false() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::Some(false), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_fp8x23_3D_keepdims_false/input_0.cairo b/tests/nodes/argmax_fp8x23_3D_keepdims_false/input_0.cairo deleted file mode 100644 index a7ac816c9..000000000 --- a/tests/nodes/argmax_fp8x23_3D_keepdims_false/input_0.cairo +++ /dev/null @@ -1,22 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 444596224, sign: false }); - data.append(FP8x23 { mag: 369098752, sign: false }); - data.append(FP8x23 { mag: 1056964608, sign: false }); - data.append(FP8x23 { mag: 234881024, sign: false }); - data.append(FP8x23 { mag: 159383552, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 897581056, sign: true }); - data.append(FP8x23 { mag: 327155712, sign: true }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp8x23_3D_keepdims_false/output_0.cairo b/tests/nodes/argmax_fp8x23_3D_keepdims_false/output_0.cairo deleted file mode 100644 index 637e10c01..000000000 --- a/tests/nodes/argmax_fp8x23_3D_keepdims_false/output_0.cairo +++ /dev/null @@ -1,16 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp8x23_3D_last_index.cairo b/tests/nodes/argmax_fp8x23_3D_last_index.cairo deleted file mode 100644 index 856fbd988..000000000 --- a/tests/nodes/argmax_fp8x23_3D_last_index.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::FP8x23TensorPartialEq; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_fp8x23_3D_last_index() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::Some(true)); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_fp8x23_3D_last_index/input_0.cairo b/tests/nodes/argmax_fp8x23_3D_last_index/input_0.cairo deleted file mode 100644 index 42b2e25d0..000000000 --- a/tests/nodes/argmax_fp8x23_3D_last_index/input_0.cairo +++ /dev/null @@ -1,22 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::numbers::{FixedTrait, FP8x23}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 83886080, sign: false }); - data.append(FP8x23 { mag: 729808896, sign: true }); - data.append(FP8x23 { mag: 536870912, sign: true }); - data.append(FP8x23 { mag: 905969664, sign: false }); - data.append(FP8x23 { mag: 75497472, sign: true }); - data.append(FP8x23 { mag: 377487360, sign: true }); - data.append(FP8x23 { mag: 92274688, sign: true }); - data.append(FP8x23 { mag: 209715200, sign: true }); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp8x23_3D_last_index/output_0.cairo b/tests/nodes/argmax_fp8x23_3D_last_index/output_0.cairo deleted file mode 100644 index e35cd05a2..000000000 --- a/tests/nodes/argmax_fp8x23_3D_last_index/output_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(1); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i32_1D_default.cairo b/tests/nodes/argmax_i32_1D_default.cairo deleted file mode 100644 index 7846917bd..000000000 --- a/tests/nodes/argmax_i32_1D_default.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_i32_1D_default() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_i32_1D_default/output_0.cairo b/tests/nodes/argmax_i32_1D_default/output_0.cairo deleted file mode 100644 index 23846d97b..000000000 --- a/tests/nodes/argmax_i32_1D_default/output_0.cairo +++ /dev/null @@ -1,12 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i32_1D_keepdims_false.cairo b/tests/nodes/argmax_i32_1D_keepdims_false.cairo deleted file mode 100644 index 9649435c7..000000000 --- a/tests/nodes/argmax_i32_1D_keepdims_false.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_i32_1D_keepdims_false() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::Some(false), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_i32_1D_keepdims_false/input_0.cairo b/tests/nodes/argmax_i32_1D_keepdims_false/input_0.cairo deleted file mode 100644 index 7a2414371..000000000 --- a/tests/nodes/argmax_i32_1D_keepdims_false/input_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(-69); - data.append(51); - data.append(-49); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i32_1D_keepdims_false/output_0.cairo b/tests/nodes/argmax_i32_1D_keepdims_false/output_0.cairo deleted file mode 100644 index f59edfe99..000000000 --- a/tests/nodes/argmax_i32_1D_keepdims_false/output_0.cairo +++ /dev/null @@ -1,12 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(1); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i32_1D_last_index.cairo b/tests/nodes/argmax_i32_1D_last_index.cairo deleted file mode 100644 index 94f533be6..000000000 --- a/tests/nodes/argmax_i32_1D_last_index.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_i32_1D_last_index() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::Some(true)); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_i32_1D_last_index/input_0.cairo b/tests/nodes/argmax_i32_1D_last_index/input_0.cairo deleted file mode 100644 index fa967cdc9..000000000 --- a/tests/nodes/argmax_i32_1D_last_index/input_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(135); - data.append(148); - data.append(67); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i32_1D_last_index/output_0.cairo b/tests/nodes/argmax_i32_1D_last_index/output_0.cairo deleted file mode 100644 index f59edfe99..000000000 --- a/tests/nodes/argmax_i32_1D_last_index/output_0.cairo +++ /dev/null @@ -1,12 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(1); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i32_2D_default.cairo b/tests/nodes/argmax_i32_2D_default.cairo deleted file mode 100644 index 1d8e4b3f7..000000000 --- a/tests/nodes/argmax_i32_2D_default.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_i32_2D_default() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_i32_2D_default/output_0.cairo b/tests/nodes/argmax_i32_2D_default/output_0.cairo deleted file mode 100644 index e072284af..000000000 --- a/tests/nodes/argmax_i32_2D_default/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i32_2D_keepdims_false.cairo b/tests/nodes/argmax_i32_2D_keepdims_false.cairo deleted file mode 100644 index 6e5372c2b..000000000 --- a/tests/nodes/argmax_i32_2D_keepdims_false.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_i32_2D_keepdims_false() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::Some(false), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_i32_2D_keepdims_false/output_0.cairo b/tests/nodes/argmax_i32_2D_keepdims_false/output_0.cairo deleted file mode 100644 index 4870d2de9..000000000 --- a/tests/nodes/argmax_i32_2D_keepdims_false/output_0.cairo +++ /dev/null @@ -1,13 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i32_2D_last_index.cairo b/tests/nodes/argmax_i32_2D_last_index.cairo deleted file mode 100644 index 5d734d434..000000000 --- a/tests/nodes/argmax_i32_2D_last_index.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_i32_2D_last_index() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::Some(true)); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_i32_2D_last_index/output_0.cairo b/tests/nodes/argmax_i32_2D_last_index/output_0.cairo deleted file mode 100644 index e072284af..000000000 --- a/tests/nodes/argmax_i32_2D_last_index/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i32_3D_default.cairo b/tests/nodes/argmax_i32_3D_default.cairo deleted file mode 100644 index 0f1088869..000000000 --- a/tests/nodes/argmax_i32_3D_default.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_i32_3D_default() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_i32_3D_default/input_0.cairo b/tests/nodes/argmax_i32_3D_default/input_0.cairo deleted file mode 100644 index 142afb3c3..000000000 --- a/tests/nodes/argmax_i32_3D_default/input_0.cairo +++ /dev/null @@ -1,21 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(-56); - data.append(46); - data.append(-48); - data.append(91); - data.append(-70); - data.append(-54); - data.append(96); - data.append(122); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i32_3D_default/output_0.cairo b/tests/nodes/argmax_i32_3D_default/output_0.cairo deleted file mode 100644 index a4cc4f838..000000000 --- a/tests/nodes/argmax_i32_3D_default/output_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i32_3D_keepdims_false.cairo b/tests/nodes/argmax_i32_3D_keepdims_false.cairo deleted file mode 100644 index 8aac46a2c..000000000 --- a/tests/nodes/argmax_i32_3D_keepdims_false.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_i32_3D_keepdims_false() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::Some(false), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_i32_3D_keepdims_false/input_0.cairo b/tests/nodes/argmax_i32_3D_keepdims_false/input_0.cairo deleted file mode 100644 index 5a61a206a..000000000 --- a/tests/nodes/argmax_i32_3D_keepdims_false/input_0.cairo +++ /dev/null @@ -1,21 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(-121); - data.append(-114); - data.append(56); - data.append(42); - data.append(79); - data.append(43); - data.append(126); - data.append(-64); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i32_3D_keepdims_false/output_0.cairo b/tests/nodes/argmax_i32_3D_keepdims_false/output_0.cairo deleted file mode 100644 index 1d637338a..000000000 --- a/tests/nodes/argmax_i32_3D_keepdims_false/output_0.cairo +++ /dev/null @@ -1,16 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); - data.append(1); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i32_3D_last_index.cairo b/tests/nodes/argmax_i32_3D_last_index.cairo deleted file mode 100644 index 81f4997de..000000000 --- a/tests/nodes/argmax_i32_3D_last_index.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::I32TensorPartialEq; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_i32_3D_last_index() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::Some(true)); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_i32_3D_last_index/input_0.cairo b/tests/nodes/argmax_i32_3D_last_index/input_0.cairo deleted file mode 100644 index c5d8bc74e..000000000 --- a/tests/nodes/argmax_i32_3D_last_index/input_0.cairo +++ /dev/null @@ -1,21 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(98); - data.append(89); - data.append(-126); - data.append(-68); - data.append(31); - data.append(7); - data.append(-86); - data.append(99); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i32_3D_last_index/output_0.cairo b/tests/nodes/argmax_i32_3D_last_index/output_0.cairo deleted file mode 100644 index a4cc4f838..000000000 --- a/tests/nodes/argmax_i32_3D_last_index/output_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(1); - data.append(1); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i8_1D_default.cairo b/tests/nodes/argmax_i8_1D_default.cairo deleted file mode 100644 index 5fd40f6b5..000000000 --- a/tests/nodes/argmax_i8_1D_default.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_i8_1D_default() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_i8_1D_default/input_0.cairo b/tests/nodes/argmax_i8_1D_default/input_0.cairo deleted file mode 100644 index d760a1faa..000000000 --- a/tests/nodes/argmax_i8_1D_default/input_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(6); - data.append(-60); - data.append(-123); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i8_1D_default/output_0.cairo b/tests/nodes/argmax_i8_1D_default/output_0.cairo deleted file mode 100644 index e4dbc507c..000000000 --- a/tests/nodes/argmax_i8_1D_default/output_0.cairo +++ /dev/null @@ -1,12 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i8_1D_keepdims_false.cairo b/tests/nodes/argmax_i8_1D_keepdims_false.cairo deleted file mode 100644 index 9fa8ef88c..000000000 --- a/tests/nodes/argmax_i8_1D_keepdims_false.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_i8_1D_keepdims_false() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::Some(false), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_i8_1D_keepdims_false/input_0.cairo b/tests/nodes/argmax_i8_1D_keepdims_false/input_0.cairo deleted file mode 100644 index fdfdf6ff4..000000000 --- a/tests/nodes/argmax_i8_1D_keepdims_false/input_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(-68); - data.append(56); - data.append(-126); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i8_1D_keepdims_false/output_0.cairo b/tests/nodes/argmax_i8_1D_keepdims_false/output_0.cairo deleted file mode 100644 index f59edfe99..000000000 --- a/tests/nodes/argmax_i8_1D_keepdims_false/output_0.cairo +++ /dev/null @@ -1,12 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(1); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i8_1D_last_index.cairo b/tests/nodes/argmax_i8_1D_last_index.cairo deleted file mode 100644 index 54ccb82f9..000000000 --- a/tests/nodes/argmax_i8_1D_last_index.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_i8_1D_last_index() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::Some(true)); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_i8_1D_last_index/input_0.cairo b/tests/nodes/argmax_i8_1D_last_index/input_0.cairo deleted file mode 100644 index 262a8306c..000000000 --- a/tests/nodes/argmax_i8_1D_last_index/input_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(111); - data.append(84); - data.append(-120); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i8_1D_last_index/output_0.cairo b/tests/nodes/argmax_i8_1D_last_index/output_0.cairo deleted file mode 100644 index e4dbc507c..000000000 --- a/tests/nodes/argmax_i8_1D_last_index/output_0.cairo +++ /dev/null @@ -1,12 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i8_2D_default.cairo b/tests/nodes/argmax_i8_2D_default.cairo deleted file mode 100644 index dc0a71e4e..000000000 --- a/tests/nodes/argmax_i8_2D_default.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_i8_2D_default() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_i8_2D_default/input_0.cairo b/tests/nodes/argmax_i8_2D_default/input_0.cairo deleted file mode 100644 index 9aa9650c7..000000000 --- a/tests/nodes/argmax_i8_2D_default/input_0.cairo +++ /dev/null @@ -1,16 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(-8); - data.append(-67); - data.append(-22); - data.append(99); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i8_2D_default/output_0.cairo b/tests/nodes/argmax_i8_2D_default/output_0.cairo deleted file mode 100644 index a550af121..000000000 --- a/tests/nodes/argmax_i8_2D_default/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i8_2D_keepdims_false.cairo b/tests/nodes/argmax_i8_2D_keepdims_false.cairo deleted file mode 100644 index a56d67f01..000000000 --- a/tests/nodes/argmax_i8_2D_keepdims_false.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_i8_2D_keepdims_false() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::Some(false), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_i8_2D_keepdims_false/input_0.cairo b/tests/nodes/argmax_i8_2D_keepdims_false/input_0.cairo deleted file mode 100644 index 373558d6e..000000000 --- a/tests/nodes/argmax_i8_2D_keepdims_false/input_0.cairo +++ /dev/null @@ -1,16 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(25); - data.append(-120); - data.append(-2); - data.append(105); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i8_2D_keepdims_false/output_0.cairo b/tests/nodes/argmax_i8_2D_keepdims_false/output_0.cairo deleted file mode 100644 index 2fe75c127..000000000 --- a/tests/nodes/argmax_i8_2D_keepdims_false/output_0.cairo +++ /dev/null @@ -1,13 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i8_2D_last_index.cairo b/tests/nodes/argmax_i8_2D_last_index.cairo deleted file mode 100644 index 1574a72f7..000000000 --- a/tests/nodes/argmax_i8_2D_last_index.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_i8_2D_last_index() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::Some(true)); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_i8_2D_last_index/input_0.cairo b/tests/nodes/argmax_i8_2D_last_index/input_0.cairo deleted file mode 100644 index 051786fb8..000000000 --- a/tests/nodes/argmax_i8_2D_last_index/input_0.cairo +++ /dev/null @@ -1,16 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(-73); - data.append(111); - data.append(47); - data.append(-18); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i8_2D_last_index/output_0.cairo b/tests/nodes/argmax_i8_2D_last_index/output_0.cairo deleted file mode 100644 index 221e8a246..000000000 --- a/tests/nodes/argmax_i8_2D_last_index/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i8_3D_default.cairo b/tests/nodes/argmax_i8_3D_default.cairo deleted file mode 100644 index eaf92eae5..000000000 --- a/tests/nodes/argmax_i8_3D_default.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_i8_3D_default() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_i8_3D_default/input_0.cairo b/tests/nodes/argmax_i8_3D_default/input_0.cairo deleted file mode 100644 index cc96b52ca..000000000 --- a/tests/nodes/argmax_i8_3D_default/input_0.cairo +++ /dev/null @@ -1,21 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(66); - data.append(-56); - data.append(49); - data.append(-2); - data.append(-93); - data.append(-55); - data.append(115); - data.append(28); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i8_3D_default/output_0.cairo b/tests/nodes/argmax_i8_3D_default/output_0.cairo deleted file mode 100644 index 56d0d331d..000000000 --- a/tests/nodes/argmax_i8_3D_default/output_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i8_3D_keepdims_false.cairo b/tests/nodes/argmax_i8_3D_keepdims_false.cairo deleted file mode 100644 index 3b3f3c10c..000000000 --- a/tests/nodes/argmax_i8_3D_keepdims_false.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_i8_3D_keepdims_false() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::Some(false), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_i8_3D_keepdims_false/input_0.cairo b/tests/nodes/argmax_i8_3D_keepdims_false/input_0.cairo deleted file mode 100644 index f6de84eca..000000000 --- a/tests/nodes/argmax_i8_3D_keepdims_false/input_0.cairo +++ /dev/null @@ -1,21 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(82); - data.append(70); - data.append(-18); - data.append(-46); - data.append(-4); - data.append(82); - data.append(58); - data.append(-41); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i8_3D_keepdims_false/output_0.cairo b/tests/nodes/argmax_i8_3D_keepdims_false/output_0.cairo deleted file mode 100644 index f13678224..000000000 --- a/tests/nodes/argmax_i8_3D_keepdims_false/output_0.cairo +++ /dev/null @@ -1,16 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(1); - data.append(1); - data.append(1); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i8_3D_last_index.cairo b/tests/nodes/argmax_i8_3D_last_index.cairo deleted file mode 100644 index cd460d7c2..000000000 --- a/tests/nodes/argmax_i8_3D_last_index.cairo +++ /dev/null @@ -1,22 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_i8_3D_last_index() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::Some(true)); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_i8_3D_last_index/input_0.cairo b/tests/nodes/argmax_i8_3D_last_index/input_0.cairo deleted file mode 100644 index a665d1534..000000000 --- a/tests/nodes/argmax_i8_3D_last_index/input_0.cairo +++ /dev/null @@ -1,21 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(-7); - data.append(-94); - data.append(-64); - data.append(-19); - data.append(59); - data.append(-40); - data.append(99); - data.append(38); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_i8_3D_last_index/output_0.cairo b/tests/nodes/argmax_i8_3D_last_index/output_0.cairo deleted file mode 100644 index 23b7ccc3c..000000000 --- a/tests/nodes/argmax_i8_3D_last_index/output_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); - data.append(1); - data.append(1); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_fp16x16_1D_default.cairo b/tests/nodes/argmax_keepdims.cairo similarity index 58% rename from tests/nodes/argmax_fp16x16_1D_default.cairo rename to tests/nodes/argmax_keepdims.cairo index 873579cce..49f6f5d7b 100644 --- a/tests/nodes/argmax_fp16x16_1D_default.cairo +++ b/tests/nodes/argmax_keepdims.cairo @@ -2,21 +2,21 @@ mod input_0; mod output_0; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::FP16x16TensorPartialEq; #[test] #[available_gas(2000000000)] -fn test_argmax_fp16x16_1D_default() { +fn test_argmax_keepdims() { let input_0 = input_0::input_0(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.argmax(0, Option::None(()), Option::None(())); + let y_0 = input_0.argmax(1, Option::Some(true), Option::None(())); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/argmax_fp16x16_2D_default/input_0.cairo b/tests/nodes/argmax_keepdims/input_0.cairo similarity index 65% rename from tests/nodes/argmax_fp16x16_2D_default/input_0.cairo rename to tests/nodes/argmax_keepdims/input_0.cairo index ce1b11cae..2bd0e9ded 100644 --- a/tests/nodes/argmax_fp16x16_2D_default/input_0.cairo +++ b/tests/nodes/argmax_keepdims/input_0.cairo @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 2162688, sign: false }); - data.append(FP16x16 { mag: 1507328, sign: true }); - data.append(FP16x16 { mag: 6815744, sign: false }); + data.append(FP16x16 { mag: 2, sign: false }); + data.append(FP16x16 { mag: 1, sign: false }); + data.append(FP16x16 { mag: 3, sign: false }); + data.append(FP16x16 { mag: 10, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/argmax_fp8x23_2D_default/output_0.cairo b/tests/nodes/argmax_keepdims/output_0.cairo similarity index 72% rename from tests/nodes/argmax_fp8x23_2D_default/output_0.cairo rename to tests/nodes/argmax_keepdims/output_0.cairo index a550af121..a1ae6cd36 100644 --- a/tests/nodes/argmax_fp8x23_2D_default/output_0.cairo +++ b/tests/nodes/argmax_keepdims/output_0.cairo @@ -1,11 +1,12 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); - shape.append(1); shape.append(2); + shape.append(1); let mut data = ArrayTrait::new(); data.append(0); diff --git a/tests/nodes/argmax_fp16x16_1D_keepdims_false.cairo b/tests/nodes/argmax_keepdims_select_last_index.cairo similarity index 56% rename from tests/nodes/argmax_fp16x16_1D_keepdims_false.cairo rename to tests/nodes/argmax_keepdims_select_last_index.cairo index 1563fad06..234cb0057 100644 --- a/tests/nodes/argmax_fp16x16_1D_keepdims_false.cairo +++ b/tests/nodes/argmax_keepdims_select_last_index.cairo @@ -2,21 +2,21 @@ mod input_0; mod output_0; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::FP16x16TensorPartialEq; #[test] #[available_gas(2000000000)] -fn test_argmax_fp16x16_1D_keepdims_false() { +fn test_argmax_keepdims_select_last_index() { let input_0 = input_0::input_0(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.argmax(0, Option::Some(false), Option::None(())); + let y_0 = input_0.argmax(1, Option::Some(true), Option::Some(true)); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/argmax_fp16x16_1D_keepdims_false/input_0.cairo b/tests/nodes/argmax_keepdims_select_last_index/input_0.cairo similarity index 60% rename from tests/nodes/argmax_fp16x16_1D_keepdims_false/input_0.cairo rename to tests/nodes/argmax_keepdims_select_last_index/input_0.cairo index 1c4977212..c0673e50d 100644 --- a/tests/nodes/argmax_fp16x16_1D_keepdims_false/input_0.cairo +++ b/tests/nodes/argmax_keepdims_select_last_index/input_0.cairo @@ -5,11 +5,13 @@ use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); - shape.append(3); + shape.append(2); + shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 6422528, sign: false }); - data.append(FP16x16 { mag: 2031616, sign: false }); - data.append(FP16x16 { mag: 6356992, sign: true }); + data.append(FP16x16 { mag: 2, sign: false }); + data.append(FP16x16 { mag: 2, sign: false }); + data.append(FP16x16 { mag: 3, sign: false }); + data.append(FP16x16 { mag: 10, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/argmax_i32_2D_default/input_0.cairo b/tests/nodes/argmax_keepdims_select_last_index/output_0.cairo similarity index 68% rename from tests/nodes/argmax_i32_2D_default/input_0.cairo rename to tests/nodes/argmax_keepdims_select_last_index/output_0.cairo index 92434e2c9..241b14b08 100644 --- a/tests/nodes/argmax_i32_2D_default/input_0.cairo +++ b/tests/nodes/argmax_keepdims_select_last_index/output_0.cairo @@ -1,16 +1,15 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn input_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); - shape.append(2); + shape.append(1); let mut data = ArrayTrait::new(); - data.append(-27); - data.append(114); - data.append(-54); - data.append(-100); + data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/argmax_fp16x16_2D_default.cairo b/tests/nodes/argmax_negative_axis_keepdims.cairo similarity index 56% rename from tests/nodes/argmax_fp16x16_2D_default.cairo rename to tests/nodes/argmax_negative_axis_keepdims.cairo index 754dd2119..85eb2619f 100644 --- a/tests/nodes/argmax_fp16x16_2D_default.cairo +++ b/tests/nodes/argmax_negative_axis_keepdims.cairo @@ -2,21 +2,21 @@ mod input_0; mod output_0; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::FP16x16TensorPartialEq; #[test] #[available_gas(2000000000)] -fn test_argmax_fp16x16_2D_default() { +fn test_argmax_negative_axis_keepdims() { let input_0 = input_0::input_0(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.argmax(0, Option::None(()), Option::None(())); + let y_0 = input_0.argmax(-1, Option::Some(true), Option::None(())); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/argmax_negative_axis_keepdims/input_0.cairo b/tests/nodes/argmax_negative_axis_keepdims/input_0.cairo new file mode 100644 index 000000000..2bd0e9ded --- /dev/null +++ b/tests/nodes/argmax_negative_axis_keepdims/input_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 2, sign: false }); + data.append(FP16x16 { mag: 1, sign: false }); + data.append(FP16x16 { mag: 3, sign: false }); + data.append(FP16x16 { mag: 10, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/argmax_fp16x16_2D_default/output_0.cairo b/tests/nodes/argmax_negative_axis_keepdims/output_0.cairo similarity index 72% rename from tests/nodes/argmax_fp16x16_2D_default/output_0.cairo rename to tests/nodes/argmax_negative_axis_keepdims/output_0.cairo index a550af121..a1ae6cd36 100644 --- a/tests/nodes/argmax_fp16x16_2D_default/output_0.cairo +++ b/tests/nodes/argmax_negative_axis_keepdims/output_0.cairo @@ -1,11 +1,12 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); - shape.append(1); shape.append(2); + shape.append(1); let mut data = ArrayTrait::new(); data.append(0); diff --git a/tests/nodes/argmax_negative_axis_keepdims_select_last_index.cairo b/tests/nodes/argmax_negative_axis_keepdims_select_last_index.cairo new file mode 100644 index 000000000..92c5f9f10 --- /dev/null +++ b/tests/nodes/argmax_negative_axis_keepdims_select_last_index.cairo @@ -0,0 +1,22 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::FP16x16TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_argmax_negative_axis_keepdims_select_last_index() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.argmax(-1, Option::Some(true), Option::Some(true)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/argmax_negative_axis_keepdims_select_last_index/input_0.cairo b/tests/nodes/argmax_negative_axis_keepdims_select_last_index/input_0.cairo new file mode 100644 index 000000000..c0673e50d --- /dev/null +++ b/tests/nodes/argmax_negative_axis_keepdims_select_last_index/input_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 2, sign: false }); + data.append(FP16x16 { mag: 2, sign: false }); + data.append(FP16x16 { mag: 3, sign: false }); + data.append(FP16x16 { mag: 10, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/argmax_fp16x16_3D_default/output_0.cairo b/tests/nodes/argmax_negative_axis_keepdims_select_last_index/output_0.cairo similarity index 67% rename from tests/nodes/argmax_fp16x16_3D_default/output_0.cairo rename to tests/nodes/argmax_negative_axis_keepdims_select_last_index/output_0.cairo index 03945682b..241b14b08 100644 --- a/tests/nodes/argmax_fp16x16_3D_default/output_0.cairo +++ b/tests/nodes/argmax_negative_axis_keepdims_select_last_index/output_0.cairo @@ -1,17 +1,15 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); shape.append(2); + shape.append(1); let mut data = ArrayTrait::new(); data.append(1); - data.append(0); data.append(1); - data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/argmax_fp16x16_3D_default.cairo b/tests/nodes/argmax_no_keepdims.cairo similarity index 57% rename from tests/nodes/argmax_fp16x16_3D_default.cairo rename to tests/nodes/argmax_no_keepdims.cairo index b7efa083b..c278fee35 100644 --- a/tests/nodes/argmax_fp16x16_3D_default.cairo +++ b/tests/nodes/argmax_no_keepdims.cairo @@ -2,21 +2,21 @@ mod input_0; mod output_0; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::FP16x16TensorPartialEq; #[test] #[available_gas(2000000000)] -fn test_argmax_fp16x16_3D_default() { +fn test_argmax_no_keepdims() { let input_0 = input_0::input_0(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.argmax(0, Option::None(()), Option::None(())); + let y_0 = input_0.argmax(1, Option::Some(false), Option::None(())); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/argmax_no_keepdims/input_0.cairo b/tests/nodes/argmax_no_keepdims/input_0.cairo new file mode 100644 index 000000000..2bd0e9ded --- /dev/null +++ b/tests/nodes/argmax_no_keepdims/input_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 2, sign: false }); + data.append(FP16x16 { mag: 1, sign: false }); + data.append(FP16x16 { mag: 3, sign: false }); + data.append(FP16x16 { mag: 10, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/argmax_fp16x16_2D_keepdims_false/output_0.cairo b/tests/nodes/argmax_no_keepdims/output_0.cairo similarity index 70% rename from tests/nodes/argmax_fp16x16_2D_keepdims_false/output_0.cairo rename to tests/nodes/argmax_no_keepdims/output_0.cairo index 2fe75c127..ed50e53b0 100644 --- a/tests/nodes/argmax_fp16x16_2D_keepdims_false/output_0.cairo +++ b/tests/nodes/argmax_no_keepdims/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); diff --git a/tests/nodes/argmax_no_keepdims_select_last_index.cairo b/tests/nodes/argmax_no_keepdims_select_last_index.cairo new file mode 100644 index 000000000..5233204eb --- /dev/null +++ b/tests/nodes/argmax_no_keepdims_select_last_index.cairo @@ -0,0 +1,22 @@ +mod input_0; +mod output_0; + + +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::FP16x16TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_argmax_no_keepdims_select_last_index() { + let input_0 = input_0::input_0(); + let z_0 = output_0::output_0(); + + let y_0 = input_0.argmax(1, Option::Some(false), Option::Some(true)); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/argmax_no_keepdims_select_last_index/input_0.cairo b/tests/nodes/argmax_no_keepdims_select_last_index/input_0.cairo new file mode 100644 index 000000000..c0673e50d --- /dev/null +++ b/tests/nodes/argmax_no_keepdims_select_last_index/input_0.cairo @@ -0,0 +1,17 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 2, sign: false }); + data.append(FP16x16 { mag: 2, sign: false }); + data.append(FP16x16 { mag: 3, sign: false }); + data.append(FP16x16 { mag: 10, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/argmax_i32_1D_default/input_0.cairo b/tests/nodes/argmax_no_keepdims_select_last_index/output_0.cairo similarity index 69% rename from tests/nodes/argmax_i32_1D_default/input_0.cairo rename to tests/nodes/argmax_no_keepdims_select_last_index/output_0.cairo index 97fbd451a..29a921caf 100644 --- a/tests/nodes/argmax_i32_1D_default/input_0.cairo +++ b/tests/nodes/argmax_no_keepdims_select_last_index/output_0.cairo @@ -1,14 +1,14 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn input_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); - shape.append(3); + shape.append(2); let mut data = ArrayTrait::new(); - data.append(-54); - data.append(29); - data.append(81); + data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/argmax_u32_1D_default.cairo b/tests/nodes/argmax_u32_1D_default.cairo deleted file mode 100644 index f8bf64a5b..000000000 --- a/tests/nodes/argmax_u32_1D_default.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_u32_1D_default() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_u32_1D_default/input_0.cairo b/tests/nodes/argmax_u32_1D_default/input_0.cairo deleted file mode 100644 index d02f0a9a5..000000000 --- a/tests/nodes/argmax_u32_1D_default/input_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(244); - data.append(135); - data.append(53); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_u32_1D_default/output_0.cairo b/tests/nodes/argmax_u32_1D_default/output_0.cairo deleted file mode 100644 index e4dbc507c..000000000 --- a/tests/nodes/argmax_u32_1D_default/output_0.cairo +++ /dev/null @@ -1,12 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_u32_1D_keepdims_false.cairo b/tests/nodes/argmax_u32_1D_keepdims_false.cairo deleted file mode 100644 index aa6555d1c..000000000 --- a/tests/nodes/argmax_u32_1D_keepdims_false.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_u32_1D_keepdims_false() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::Some(false), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_u32_1D_keepdims_false/input_0.cairo b/tests/nodes/argmax_u32_1D_keepdims_false/input_0.cairo deleted file mode 100644 index 161945ea0..000000000 --- a/tests/nodes/argmax_u32_1D_keepdims_false/input_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(89); - data.append(33); - data.append(140); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_u32_1D_keepdims_false/output_0.cairo b/tests/nodes/argmax_u32_1D_keepdims_false/output_0.cairo deleted file mode 100644 index 23846d97b..000000000 --- a/tests/nodes/argmax_u32_1D_keepdims_false/output_0.cairo +++ /dev/null @@ -1,12 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(2); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_u32_1D_last_index.cairo b/tests/nodes/argmax_u32_1D_last_index.cairo deleted file mode 100644 index e6ad1f332..000000000 --- a/tests/nodes/argmax_u32_1D_last_index.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_u32_1D_last_index() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::Some(true)); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_u32_1D_last_index/input_0.cairo b/tests/nodes/argmax_u32_1D_last_index/input_0.cairo deleted file mode 100644 index caad0d75f..000000000 --- a/tests/nodes/argmax_u32_1D_last_index/input_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(3); - - let mut data = ArrayTrait::new(); - data.append(63); - data.append(40); - data.append(22); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_u32_1D_last_index/output_0.cairo b/tests/nodes/argmax_u32_1D_last_index/output_0.cairo deleted file mode 100644 index e4dbc507c..000000000 --- a/tests/nodes/argmax_u32_1D_last_index/output_0.cairo +++ /dev/null @@ -1,12 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - - let mut data = ArrayTrait::new(); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_u32_2D_default.cairo b/tests/nodes/argmax_u32_2D_default.cairo deleted file mode 100644 index 9c9a285bd..000000000 --- a/tests/nodes/argmax_u32_2D_default.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_u32_2D_default() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_u32_2D_default/input_0.cairo b/tests/nodes/argmax_u32_2D_default/input_0.cairo deleted file mode 100644 index e6678379b..000000000 --- a/tests/nodes/argmax_u32_2D_default/input_0.cairo +++ /dev/null @@ -1,16 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(51); - data.append(202); - data.append(164); - data.append(83); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_u32_2D_default/output_0.cairo b/tests/nodes/argmax_u32_2D_default/output_0.cairo deleted file mode 100644 index 221e8a246..000000000 --- a/tests/nodes/argmax_u32_2D_default/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_u32_2D_keepdims_false.cairo b/tests/nodes/argmax_u32_2D_keepdims_false.cairo deleted file mode 100644 index 8bfa99454..000000000 --- a/tests/nodes/argmax_u32_2D_keepdims_false.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_u32_2D_keepdims_false() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::Some(false), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_u32_2D_keepdims_false/input_0.cairo b/tests/nodes/argmax_u32_2D_keepdims_false/input_0.cairo deleted file mode 100644 index 19a457838..000000000 --- a/tests/nodes/argmax_u32_2D_keepdims_false/input_0.cairo +++ /dev/null @@ -1,16 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(207); - data.append(36); - data.append(209); - data.append(4); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_u32_2D_keepdims_false/output_0.cairo b/tests/nodes/argmax_u32_2D_keepdims_false/output_0.cairo deleted file mode 100644 index edc68637b..000000000 --- a/tests/nodes/argmax_u32_2D_keepdims_false/output_0.cairo +++ /dev/null @@ -1,13 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_u32_2D_last_index.cairo b/tests/nodes/argmax_u32_2D_last_index.cairo deleted file mode 100644 index 60b70f828..000000000 --- a/tests/nodes/argmax_u32_2D_last_index.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_u32_2D_last_index() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::Some(true)); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_u32_2D_last_index/input_0.cairo b/tests/nodes/argmax_u32_2D_last_index/input_0.cairo deleted file mode 100644 index 0f577ecf4..000000000 --- a/tests/nodes/argmax_u32_2D_last_index/input_0.cairo +++ /dev/null @@ -1,16 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(41); - data.append(93); - data.append(233); - data.append(71); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_u32_2D_last_index/output_0.cairo b/tests/nodes/argmax_u32_2D_last_index/output_0.cairo deleted file mode 100644 index 221e8a246..000000000 --- a/tests/nodes/argmax_u32_2D_last_index/output_0.cairo +++ /dev/null @@ -1,14 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_u32_3D_default.cairo b/tests/nodes/argmax_u32_3D_default.cairo deleted file mode 100644 index f9f47d2fd..000000000 --- a/tests/nodes/argmax_u32_3D_default.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_u32_3D_default() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_u32_3D_default/input_0.cairo b/tests/nodes/argmax_u32_3D_default/input_0.cairo deleted file mode 100644 index 83343807d..000000000 --- a/tests/nodes/argmax_u32_3D_default/input_0.cairo +++ /dev/null @@ -1,21 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(22); - data.append(254); - data.append(48); - data.append(151); - data.append(21); - data.append(13); - data.append(254); - data.append(100); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_u32_3D_default/output_0.cairo b/tests/nodes/argmax_u32_3D_default/output_0.cairo deleted file mode 100644 index e3da7dd7d..000000000 --- a/tests/nodes/argmax_u32_3D_default/output_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(1); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_u32_3D_keepdims_false.cairo b/tests/nodes/argmax_u32_3D_keepdims_false.cairo deleted file mode 100644 index 7ee7d2aad..000000000 --- a/tests/nodes/argmax_u32_3D_keepdims_false.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_u32_3D_keepdims_false() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::Some(false), Option::None(())); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_u32_3D_keepdims_false/input_0.cairo b/tests/nodes/argmax_u32_3D_keepdims_false/input_0.cairo deleted file mode 100644 index 4842ff00c..000000000 --- a/tests/nodes/argmax_u32_3D_keepdims_false/input_0.cairo +++ /dev/null @@ -1,21 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(183); - data.append(138); - data.append(72); - data.append(104); - data.append(239); - data.append(185); - data.append(16); - data.append(188); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_u32_3D_keepdims_false/output_0.cairo b/tests/nodes/argmax_u32_3D_keepdims_false/output_0.cairo deleted file mode 100644 index 15d30cb7a..000000000 --- a/tests/nodes/argmax_u32_3D_keepdims_false/output_0.cairo +++ /dev/null @@ -1,16 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); - data.append(0); - data.append(1); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_u32_3D_last_index.cairo b/tests/nodes/argmax_u32_3D_last_index.cairo deleted file mode 100644 index 8098105e1..000000000 --- a/tests/nodes/argmax_u32_3D_last_index.cairo +++ /dev/null @@ -1,20 +0,0 @@ -mod input_0; -mod output_0; - - -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; -use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; - -#[test] -#[available_gas(2000000000)] -fn test_argmax_u32_3D_last_index() { - let input_0 = input_0::input_0(); - let z = output_0::output_0(); - - let y = input_0.argmax(0, Option::None(()), Option::Some(true)); - - assert_eq(y, z); -} diff --git a/tests/nodes/argmax_u32_3D_last_index/input_0.cairo b/tests/nodes/argmax_u32_3D_last_index/input_0.cairo deleted file mode 100644 index 2a464efd3..000000000 --- a/tests/nodes/argmax_u32_3D_last_index/input_0.cairo +++ /dev/null @@ -1,21 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn input_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(2); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(9); - data.append(169); - data.append(140); - data.append(99); - data.append(130); - data.append(132); - data.append(79); - data.append(57); - TensorTrait::new(shape.span(), data.span()) -} diff --git a/tests/nodes/argmax_u32_3D_last_index/output_0.cairo b/tests/nodes/argmax_u32_3D_last_index/output_0.cairo deleted file mode 100644 index 217f9ee86..000000000 --- a/tests/nodes/argmax_u32_3D_last_index/output_0.cairo +++ /dev/null @@ -1,17 +0,0 @@ -use core::array::{ArrayTrait, SpanTrait}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorAdd}; - -fn output_0() -> Tensor { - let mut shape = ArrayTrait::::new(); - shape.append(1); - shape.append(2); - shape.append(2); - - let mut data = ArrayTrait::new(); - data.append(1); - data.append(0); - data.append(0); - data.append(0); - TensorTrait::new(shape.span(), data.span()) -} From 97932d65f8457f94b2b45c4176c30625ccebaec0 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 25 Mar 2024 09:32:11 +0100 Subject: [PATCH 55/68] update doc --- docs/framework/operators/tensor/tensor.argmax.md | 4 ++-- src/operators/tensor/core.cairo | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/framework/operators/tensor/tensor.argmax.md b/docs/framework/operators/tensor/tensor.argmax.md index cb3457a45..43b1e3721 100644 --- a/docs/framework/operators/tensor/tensor.argmax.md +++ b/docs/framework/operators/tensor/tensor.argmax.md @@ -1,7 +1,7 @@ # tensor.argmax ```rust - fn argmax(self: @Tensor, axis: usize, keepdims: Option, select_last_index: Option) -> Tensor; + fn argmax(self: @Tensor, axis: i32, keepdims: Option, select_last_index: Option) -> Tensor; ``` Returns the index of the maximum value along the specified axis. @@ -9,7 +9,7 @@ Returns the index of the maximum value along the specified axis. ## Args * `self`(`@Tensor`) - The input tensor. -* `axis`(`usize`) - The axis along which to compute the argmax. +* `axis`(`i32`) - The axis along which to compute the argmax. * `keepdims`(`Option`) - If true, retains reduced dimensions with length 1. Defaults to true. * `select_last_index`(`Option`) - If true, the index of the last occurrence of the maximum value is returned. Defaults to false. diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 25ddc8dd3..adb99299e 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -687,7 +687,7 @@ trait TensorTrait { /// # tensor.argmax /// /// ```rust - /// fn argmax(self: @Tensor, axis: usize, keepdims: Option, select_last_index: Option) -> Tensor; + /// fn argmax(self: @Tensor, axis: i32, keepdims: Option, select_last_index: Option) -> Tensor; /// ``` /// /// Returns the index of the maximum value along the specified axis. @@ -695,7 +695,7 @@ trait TensorTrait { /// ## Args /// /// * `self`(`@Tensor`) - The input tensor. - /// * `axis`(`usize`) - The axis along which to compute the argmax. + /// * `axis`(`i32`) - The axis along which to compute the argmax. /// * `keepdims`(`Option`) - If true, retains reduced dimensions with length 1. Defaults to true. /// * `select_last_index`(`Option`) - If true, the index of the last occurrence of the maximum value is returned. Defaults to false. /// From bb40472935b350d68d45e18c47222351e321557e Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 25 Mar 2024 09:47:03 +0100 Subject: [PATCH 56/68] refactor operator --- src/operators/tensor/core.cairo | 2 +- .../tensor/implementations/tensor_bool.cairo | 2 +- .../implementations/tensor_complex64.cairo | 2 +- .../implementations/tensor_fp16x16.cairo | 2 +- .../implementations/tensor_fp16x16wide.cairo | 2 +- .../implementations/tensor_fp32x32.cairo | 2 +- .../implementations/tensor_fp64x64.cairo | 2 +- .../implementations/tensor_fp8x23.cairo | 2 +- .../implementations/tensor_fp8x23wide.cairo | 2 +- .../tensor/implementations/tensor_i32.cairo | 2 +- .../tensor/implementations/tensor_i8.cairo | 2 +- .../tensor/implementations/tensor_u32.cairo | 2 +- src/operators/tensor/math/less.cairo | 10 +- tests/lib.cairo | 2 +- tests/nodes.cairo | 1884 ++++++++--------- 15 files changed, 960 insertions(+), 960 deletions(-) diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index adb99299e..02ed17ae9 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -1317,7 +1317,7 @@ trait TensorTrait { /// >>> [false,false,false,false,false,false,false,true,true] /// ``` /// - fn less(self: @Tensor, other: @Tensor) -> Tensor; + fn less(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.less_equal /// /// ```rust diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index d08a0a2c1..7e2421a7d 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -117,7 +117,7 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 2bbbab8fe..5e4650df7 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -134,7 +134,7 @@ impl Complex64Tensor of TensorTrait { panic(array!['not supported!']) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index e1a780e17..d5045d327 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -128,7 +128,7 @@ impl FP16x16Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index 13ea883cf..42bf6e195 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -138,7 +138,7 @@ impl FP16x16WTensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index cb745f113..260e33acc 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -125,7 +125,7 @@ impl FP32x32Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 0ee0f41d2..9427623fb 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -125,7 +125,7 @@ impl FP64x64Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index c404e4641..d614fcb89 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -125,7 +125,7 @@ impl FP8x23Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index f415266c4..50000dcb8 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -128,7 +128,7 @@ impl FP8x23WTensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 3c3641310..483845743 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -126,7 +126,7 @@ impl I32Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index a872a4665..78d55c8d0 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -123,7 +123,7 @@ impl I8Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index e85316a96..19292ae15 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -122,7 +122,7 @@ impl U32Tensor of TensorTrait { math::greater_equal::greater_equal(self, other) } - fn less(self: @Tensor, other: @Tensor) -> Tensor { + fn less(self: @Tensor, other: @Tensor) -> Tensor { math::less::less(self, other) } diff --git a/src/operators/tensor/math/less.cairo b/src/operators/tensor/math/less.cairo index 240c92c57..d23fb4301 100644 --- a/src/operators/tensor/math/less.cairo +++ b/src/operators/tensor/math/less.cairo @@ -1,4 +1,4 @@ -use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, BoolTensor}; +use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, I32Tensor}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility }; @@ -6,9 +6,9 @@ use orion::operators::tensor::helpers::{ /// Cf: TensorTrait::less docstring fn less, impl TCopy: Copy, impl TDrop: Drop>( y: @Tensor, z: @Tensor -) -> Tensor { +) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); @@ -20,9 +20,9 @@ fn less, impl TCopy: Copy, impl TDrop: Dro let indices_other = broadcast_index_mapping(*z.shape, indices_broadcasted); if *(*y.data)[indices_self] < *(*z.data)[indices_other] { - result.append(true); + result.append(1); } else { - result.append(false); + result.append(0); } n += 1; diff --git a/tests/lib.cairo b/tests/lib.cairo index a61287d92..eb58139db 100644 --- a/tests/lib.cairo +++ b/tests/lib.cairo @@ -1,7 +1,7 @@ // mod numbers; // mod performance; // mod tensor_core; -mod nodes; +// mod nodes; // mod ml; // mod operators; diff --git a/tests/nodes.cairo b/tests/nodes.cairo index a8b5f3db6..244d8b0c9 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1,945 +1,945 @@ -// mod abs_fp16x16; -// mod abs_fp8x23; -// mod abs_i32; -// mod abs_i8; -// mod acos_fp16x16; -// mod acos_fp8x23; -// mod acosh_fp16x16; -// mod acosh_fp8x23; -// mod add_fp16x16; -// mod add_fp16x16_broadcast; -// mod add_fp8x23; -// mod add_fp8x23_broadcast; -// mod add_i32; -// mod add_i32_broadcast; -// mod add_i8; -// mod add_i8_broadcast; -// mod add_u32; -// mod add_u32_broadcast; -// mod argmin_fp16x16_1D_default; -// mod argmin_fp16x16_1D_keepdims_false; -// mod argmin_fp16x16_1D_last_index; -// mod argmin_fp16x16_2D_default; -// mod argmin_fp16x16_2D_keepdims_false; -// mod argmin_fp16x16_2D_last_index; -// mod argmin_fp16x16_3D_default; -// mod argmin_fp16x16_3D_keepdims_false; -// mod argmin_fp16x16_3D_last_index; -// mod argmin_fp8x23_1D_default; -// mod argmin_fp8x23_1D_keepdims_false; -// mod argmin_fp8x23_1D_last_index; -// mod argmin_fp8x23_2D_default; -// mod argmin_fp8x23_2D_keepdims_false; -// mod argmin_fp8x23_2D_last_index; -// mod argmin_fp8x23_3D_default; -// mod argmin_fp8x23_3D_keepdims_false; -// mod argmin_fp8x23_3D_last_index; -// mod argmin_i32_1D_default; -// mod argmin_i32_1D_keepdims_false; -// mod argmin_i32_1D_last_index; -// mod argmin_i32_2D_default; -// mod argmin_i32_2D_keepdims_false; -// mod argmin_i32_2D_last_index; -// mod argmin_i32_3D_default; -// mod argmin_i32_3D_keepdims_false; -// mod argmin_i32_3D_last_index; -// mod argmin_i8_1D_default; -// mod argmin_i8_1D_keepdims_false; -// mod argmin_i8_1D_last_index; -// mod argmin_i8_2D_default; -// mod argmin_i8_2D_keepdims_false; -// mod argmin_i8_2D_last_index; -// mod argmin_i8_3D_default; -// mod argmin_i8_3D_keepdims_false; -// mod argmin_i8_3D_last_index; -// mod argmin_u32_1D_default; -// mod argmin_u32_1D_keepdims_false; -// mod argmin_u32_1D_last_index; -// mod argmin_u32_2D_default; -// mod argmin_u32_2D_keepdims_false; -// mod argmin_u32_2D_last_index; -// mod argmin_u32_3D_default; -// mod argmin_u32_3D_keepdims_false; -// mod argmin_u32_3D_last_index; -// mod asin_fp16x16; -// mod asin_fp8x23; -// mod asinh_fp16x16; -// mod asinh_fp8x23; -// mod atan_fp16x16; -// mod atan_fp8x23; -// mod ceil_fp16x16; -// mod ceil_fp8x23; -// mod concat_fp16x16_1d; -// mod concat_fp16x16_2d; -// mod concat_fp16x16_3d_default; -// mod concat_fp16x16_3d_axis_1; -// mod concat_fp16x16_3d_axis_2; -// mod concat_fp16x16_3d_three_tensors_axis_1; -// mod concat_fp16x16_3d_three_tensors_axis_2; -// mod concat_fp8x23_1d; -// mod concat_fp8x23_2d; -// mod concat_fp8x23_3d_default; -// mod concat_fp8x23_3d_axis_1; -// mod concat_fp8x23_3d_axis_2; -// mod concat_fp8x23_3d_three_tensors_axis_1; -// mod concat_fp8x23_3d_three_tensors_axis_2; -// mod concat_i32_1d; -// mod concat_i32_2d; -// mod concat_i32_3d_default; -// mod concat_i32_3d_axis_1; -// mod concat_i32_3d_axis_2; -// mod concat_i32_3d_three_tensors_axis_1; -// mod concat_i32_3d_three_tensors_axis_2; -// mod concat_i8_1d; -// mod concat_i8_2d; -// mod concat_i8_3d_default; -// mod concat_i8_3d_axis_1; -// mod concat_i8_3d_axis_2; -// mod concat_i8_3d_three_tensors_axis_1; -// mod concat_i8_3d_three_tensors_axis_2; -// mod concat_u32_1d; -// mod concat_u32_2d; -// mod concat_u32_3d_default; -// mod concat_u32_3d_axis_1; -// mod concat_u32_3d_axis_2; -// mod concat_u32_3d_three_tensors_axis_1; -// mod concat_u32_3d_three_tensors_axis_2; -// mod cos_fp16x16; -// mod cos_fp8x23; -// mod cosh_fp16x16; -// mod cosh_fp8x23; -// mod cumsum_fp16x16_1d_default; -// mod cumsum_fp16x16_1d_exclusive; -// mod cumsum_fp16x16_1d_reverse; -// mod cumsum_fp16x16_1d_reverse_exclusive; -// mod cumsum_fp16x16_2d_axis_0; -// mod cumsum_fp16x16_2d_axis_1; -// mod cumsum_fp8x23_1d_default; -// mod cumsum_fp8x23_1d_exclusive; -// mod cumsum_fp8x23_1d_reverse; -// mod cumsum_fp8x23_1d_reverse_exclusive; -// mod cumsum_fp8x23_2d_axis_0; -// mod cumsum_fp8x23_2d_axis_1; -// mod cumsum_i32_1d_default; -// mod cumsum_i32_1d_exclusive; -// mod cumsum_i32_1d_reverse; -// mod cumsum_i32_1d_reverse_exclusive; -// mod cumsum_i32_2d_axis_0; -// mod cumsum_i32_2d_axis_1; -// mod cumsum_i8_1d_default; -// mod cumsum_i8_1d_exclusive; -// mod cumsum_i8_1d_reverse; -// mod cumsum_i8_1d_reverse_exclusive; -// mod cumsum_i8_2d_axis_0; -// mod cumsum_i8_2d_axis_1; -// mod cumsum_u32_1d_default; -// mod cumsum_u32_1d_exclusive; -// mod cumsum_u32_1d_reverse; -// mod cumsum_u32_1d_reverse_exclusive; -// mod cumsum_u32_2d_axis_0; -// mod cumsum_u32_2d_axis_1; -// mod div_fp16x16; -// mod div_fp16x16_broadcast; -// mod div_fp8x23; -// mod div_fp8x23_broadcast; -// mod div_i32; -// mod div_i32_broadcast; -// mod div_i8; -// mod div_i8_broadcast; -// mod div_u32; -// mod div_u32_broadcast; -// mod equal_fp16x16; -// mod equal_fp16x16_broadcast; -// mod equal_fp8x23; -// mod equal_fp8x23_broadcast; -// mod equal_i32; -// mod equal_i32_broadcast; -// mod equal_i8; -// mod equal_i8_broadcast; -// mod equal_u32; -// mod equal_u32_broadcast; -// mod exp_fp16x16; -// mod exp_fp8x23; -// mod less_equal_fp16x16; -// mod less_equal_fp16x16_broadcast; -// mod less_equal_fp8x23; -// mod less_equal_fp8x23_broadcast; -// mod less_equal_i32; -// mod less_equal_i32_broadcast; -// mod less_equal_i8; -// mod less_equal_i8_broadcast; -// mod less_equal_u32; -// mod less_equal_u32_broadcast; -// mod greater_fp16x16; -// mod greater_fp16x16_broadcast; -// mod greater_fp8x23; -// mod greater_fp8x23_broadcast; -// mod greater_i32; -// mod greater_i32_broadcast; -// mod greater_i8; -// mod greater_i8_broadcast; -// mod greater_u32; -// mod greater_u32_broadcast; -// mod leaky_relu_fp16x16; -// mod leaky_relu_fp8x23; -// mod linear_fp16x16; -// mod linear_fp8x23; -// mod linear_i32; -// mod linear_i8; -// mod linear_u32; -// mod log_fp16x16; -// mod log_fp8x23; -// mod logsoftmax_fp16x16_axis_0; -// mod logsoftmax_fp16x16_axis_1; -// mod logsoftmax_fp8x23_axis_0; -// mod logsoftmax_fp8x23_axis_1; -// mod matmul_fp16x16_1d; -// mod matmul_fp16x16_2x2; -// mod matmul_fp16x16_2x1; -// mod matmul_fp16x16_1x2; -// mod matmul_fp8x23_1d; -// mod matmul_fp8x23_2x2; -// mod matmul_fp8x23_2x1; -// mod matmul_fp8x23_1x2; -// mod matmul_i32_1d; -// mod matmul_i32_2x2; -// mod matmul_i32_2x1; -// mod matmul_i32_1x2; -// mod matmul_i8_1d; -// mod matmul_i8_2x2; -// mod matmul_i8_2x1; -// mod matmul_i8_1x2; -// mod matmul_u32_1d; -// mod matmul_u32_2x2; -// mod matmul_u32_2x1; -// mod matmul_u32_1x2; -// mod mul_fp16x16; -// mod mul_fp16x16_broadcast; -// mod mul_fp8x23; -// mod mul_fp8x23_broadcast; -// mod mul_i32; -// mod mul_i32_broadcast; -// mod mul_i8; -// mod mul_i8_broadcast; -// mod mul_u32; -// mod mul_u32_broadcast; -// mod or_fp16x16; -// mod or_fp16x16_broadcast; -// mod or_fp8x23; -// mod or_fp8x23_broadcast; -// mod or_i32; -// mod or_i32_broadcast; -// mod or_i8; -// mod or_i8_broadcast; -// mod or_u32; -// mod or_u32_broadcast; -// mod relu_fp16x16; -// mod relu_fp8x23; -// mod relu_i32; -// mod relu_i8; -// mod sigmoid_fp16x16; -// mod sigmoid_fp8x23; -// mod sin_fp16x16; -// mod sin_fp8x23; -// mod sinh_fp16x16; -// mod sinh_fp8x23; -// mod softplus_fp8x23; -// mod softplus_fp16x16; -// mod softsign_fp8x23; -// mod softsign_fp16x16; -// mod sqrt_fp16x16; -// mod sqrt_fp8x23; -// mod sub_fp16x16; -// mod sub_fp16x16_broadcast; -// mod sub_fp8x23; -// mod sub_fp8x23_broadcast; -// mod sub_i32; -// mod sub_i32_broadcast; -// mod sub_i8; -// mod sub_i8_broadcast; -// mod sub_u32; -// mod sub_u32_broadcast; -// mod tanh_fp16x16; -// mod tanh_fp8x23; -// mod transpose_fp16x16_2d; -// mod transpose_fp16x16_3d; -// mod transpose_fp8x23_2d; -// mod transpose_fp8x23_3d; -// mod transpose_i32_2d; -// mod transpose_i32_3d; -// mod transpose_i8_2d; -// mod transpose_i8_3d; -// mod transpose_u32_2d; -// mod transpose_u32_3d; -// mod xor_fp16x16; -// mod xor_fp16x16_broadcast; -// mod xor_fp8x23; -// mod xor_fp8x23_broadcast; -// mod xor_i32; -// mod xor_i32_broadcast; -// mod xor_i8; -// mod xor_i8_broadcast; -// mod xor_u32; -// mod xor_u32_broadcast; -// mod greater_equal_fp16x16; -// mod greater_equal_fp16x16_broadcast; -// mod greater_equal_fp8x23; -// mod greater_equal_fp8x23_broadcast; -// mod greater_equal_i32; -// mod greater_equal_i32_broadcast; -// mod greater_equal_i8; -// mod greater_equal_i8_broadcast; -// mod greater_equal_u32; -// mod greater_equal_u32_broadcast; -// mod slice_fp16x16_2d; -// mod slice_fp16x16_3d; -// mod slice_fp8x23_2d; -// mod slice_fp8x23_3d; -// mod slice_i32_2d; -// mod slice_i32_3d; -// mod slice_i8_2d; -// mod slice_i8_3d; -// mod slice_u32_2d; -// mod slice_u32_3d; -// mod nonzero_fp16x16_2d; -// mod nonzero_fp16x16_3d; -// mod nonzero_fp8x23_2d; -// mod nonzero_fp8x23_3d; -// mod nonzero_i32_2d; -// mod nonzero_i32_3d; -// mod nonzero_i8_2d; -// mod nonzero_i8_3d; -// mod nonzero_u32_2d; -// mod nonzero_u32_3d; -// mod squeeze_fP16x16; -// mod squeeze_fP8x23; -// mod squeeze_i32; -// mod squeeze_i8; -// mod squeeze_u32; -// mod unsqueeze_fp16x16_2d; -// mod unsqueeze_fp16x16_3d; -// mod unsqueeze_fp8x23_2d; -// mod unsqueeze_fp8x23_3d; -// mod unsqueeze_i32_2d; -// mod unsqueeze_i32_3d; -// mod unsqueeze_i8_2d; -// mod unsqueeze_i8_3d; -// mod unsqueeze_u32_2d; -// mod unsqueeze_u32_3d; -// mod sign_fP16x16; -// mod sign_fP8x23; -// mod sign_fail; -// mod sign_i32; -// mod sign_i8; -// mod clip_fp16x16_2d; -// mod clip_fp16x16_3d; -// mod clip_fp8x23_2d; -// mod clip_fp8x23_3d; -// mod clip_i32_2d; -// mod clip_i32_3d; -// mod clip_i8_2d; -// mod clip_i8_3d; -// mod clip_u32_2d; -// mod clip_u32_3d; -// mod identity_fP16x16; -// mod identity_fP8x23; -// mod identity_i32; -// mod identity_i8; -// mod identity_u32; -// mod thresholded_relu_fp16x16; -// mod thresholded_relu_fp8x23; -// mod hard_sigmoid_fp8x23; -// mod hard_sigmoid_fp16x16; -// mod neg_fp16x16; -// mod neg_fp8x23; -// mod neg_i32; -// mod neg_i8; -// mod gemm_all_attributes; -// mod gemm_alpha; -// mod gemm_beta; -// mod gemm_default_matrix_bias; -// mod gemm_default_vector_bias; -// mod gemm_default_no_bias; -// mod gemm_transposeA; -// mod gemm_transposeB; -// mod min_fp16x16_three_tensors; -// mod min_fp16x16_broadcast_three_tensors; -// mod min_fp16x16_two_tensors; -// mod min_fp16x16_broadcast_two_tensors; -// mod min_fp8x23_three_tensors; -// mod min_fp8x23_broadcast_three_tensors; -// mod min_fp8x23_two_tensors; -// mod min_fp8x23_broadcast_two_tensors; -// mod min_i32_three_tensors; -// mod min_i32_broadcast_three_tensors; -// mod min_i32_two_tensors; -// mod min_i32_broadcast_two_tensors; -// mod min_i8_three_tensors; -// mod min_i8_broadcast_three_tensors; -// mod min_i8_two_tensors; -// mod min_i8_broadcast_two_tensors; -// mod min_u32_three_tensors; -// mod min_u32_broadcast_three_tensors; -// mod min_u32_two_tensors; -// mod min_u32_broadcast_two_tensors; -// mod where_fp16x16; -// mod where_fp16x16_broadcast; -// mod where_fp8x23; -// mod where_fp8x23_broadcast; -// mod where_i32; -// mod where_i32_broadcast; -// mod where_i8; -// mod where_i8_broadcast; -// mod where_u32; -// mod where_u32_broadcast; -// mod not_bool; -// mod round_fp16x16; -// mod round_fp8x23; -// mod max_fp16x16_three_tensors; -// mod max_fp16x16_broadcast_three_tensors; -// mod max_fp16x16_two_tensors; -// mod max_fp16x16_broadcast_two_tensors; -// mod max_fp8x23_three_tensors; -// mod max_fp8x23_broadcast_three_tensors; -// mod max_fp8x23_two_tensors; -// mod max_fp8x23_broadcast_two_tensors; -// mod max_i32_three_tensors; -// mod max_i32_broadcast_three_tensors; -// mod max_i32_two_tensors; -// mod max_i32_broadcast_two_tensors; -// mod max_i8_three_tensors; -// mod max_i8_broadcast_three_tensors; -// mod max_i8_two_tensors; -// mod max_i8_broadcast_two_tensors; -// mod max_u32_three_tensors; -// mod max_u32_broadcast_three_tensors; -// mod max_u32_two_tensors; -// mod max_u32_broadcast_two_tensors; -// mod scatter_fp16x16_3d_default; -// mod scatter_fp16x16_3d_axis1; -// mod scatter_fp16x16_3d_axis1_add; -// mod scatter_fp8x23_default; -// mod scatter_fp8x23_axis1; -// mod scatter_fp8x23_mul; -// mod scatter_i8_default; -// mod scatter_i8_axis1; -// mod scatter_i8_axis1_max; -// mod scatter_u32_default; -// mod scatter_u32_axis1; -// mod scatter_u32_add; -// mod array_feature_extractor_1D_i32; -// mod array_feature_extractor_1D_fp8x23; -// mod array_feature_extractor_1D_fp16x16; -// mod array_feature_extractor_2D_i32; -// mod array_feature_extractor_2D_fp8x23; -// mod array_feature_extractor_2D_fp16x16; -// mod array_feature_extractor_3D_i32; -// mod array_feature_extractor_3D_fp8x23; -// mod array_feature_extractor_3D_fp16x16; -// mod binarizer_fp16x16; -// mod binarizer_fp8x23; -// mod tril_fp16x16; -// mod tril_fp16x16_neg; -// mod tril_fp16x16_one_row; -// mod tril_fp16x16_out_neg; -// mod tril_fp16x16_out_pos; -// mod tril_fp16x16_pos; -// mod tril_fp16x16_square; -// mod tril_fp16x16_square_neg; -// mod tril_fp16x16_zero; -// mod triu_fp16x16; -// mod triu_fp16x16_neg; -// mod triu_fp16x16_one_row; -// mod triu_fp16x16_out_neg; -// mod triu_fp16x16_out_pos; -// mod triu_fp16x16_pos; -// mod triu_fp16x16_square; -// mod triu_fp16x16_square_neg; -// mod triu_fp16x16_zero; -// mod tril_fp8x23; -// mod tril_fp8x23_neg; -// mod tril_fp8x23_one_row; -// mod tril_fp8x23_out_neg; -// mod tril_fp8x23_out_pos; -// mod tril_fp8x23_pos; -// mod tril_fp8x23_square; -// mod tril_fp8x23_square_neg; -// mod tril_fp8x23_zero; -// mod triu_fp8x23; -// mod triu_fp8x23_neg; -// mod triu_fp8x23_one_row; -// mod triu_fp8x23_out_neg; -// mod triu_fp8x23_out_pos; -// mod triu_fp8x23_pos; -// mod triu_fp8x23_square; -// mod triu_fp8x23_square_neg; -// mod triu_fp8x23_zero; -// mod tril_i32; -// mod tril_neg_i32; -// mod tril_i32_one_row; -// mod tril_i32_out_neg; -// mod tril_i32_out_pos; -// mod tril_i32_pos; -// mod tril_i32_square; -// mod tril_i32_square_neg; -// mod tril_i32_zero; -// mod triu_i32; -// mod triu_i32_neg; -// mod triu_i32_one_row; -// mod triu_i32_out_neg; -// mod triu_i32_out_pos; -// mod triu_i32_pos; -// mod triu_i32_square; -// mod triu_i32_square_neg; -// mod triu_i32_zero; -// mod tril_i8; -// mod tril_i8_neg; -// mod tril_i8_one_row; -// mod tril_i8_out_neg; -// mod tril_i8_out_pos; -// mod tril_i8_pos; -// mod tril_i8_square; -// mod tril_i8_square_neg; -// mod tril_i8_zero; -// mod triu_i8; -// mod triu_i8_neg; -// mod triu_i8_one_row; -// mod triu_i8_out_neg; -// mod triu_i8_out_pos; -// mod triu_i8_pos; -// mod triu_i8_square; -// mod triu_i8_square_neg; -// mod triu_i8_zero; -// mod tril_u32; -// mod tril_u32_neg; -// mod tril_u32_one_row; -// mod tril_u32_out_neg; -// mod tril_u32_out_pos; -// mod tril_u32_pos; -// mod tril_u32_square; -// mod tril_u32_square_neg; -// mod tril_u32_zero; -// mod triu_u32; -// mod triu_u32_neg; -// mod triu_u32_one_row; -// mod triu_u32_out_neg; -// mod triu_u32_out_pos; -// mod triu_u32_pos; -// mod triu_u32_square; -// mod triu_u32_square_neg; -// mod triu_u32_zero; -// mod reduce_sum_square_fp16x16_export_do_not_keepdims; -// mod reduce_sum_square_fp16x16_export_keepdims; -// mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; -// mod reduce_sum_square_fp8x23_export_do_not_keepdims; -// mod reduce_sum_square_fp8x23_export_keepdims; -// mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; -// mod reduce_sum_square_i32_export_do_not_keepdims; -// mod reduce_sum_square_i32_export_keepdims; -// mod reduce_sum_square_i32_export_negative_axes_keepdims; -// mod reduce_sum_square_i8_export_do_not_keepdims; -// mod reduce_sum_square_i8_export_keepdims; -// mod reduce_sum_square_i8_export_negative_axes_keepdims; -// mod reduce_sum_square_u32_export_do_not_keepdims; -// mod reduce_sum_square_u32_export_keepdims; -// mod reduce_sum_square_u32_export_negative_axes_keepdims; -// mod reduce_l2_fp16x16_export_do_not_keepdims; -// mod reduce_l2_fp16x16_export_keepdims; -// mod reduce_l2_fp16x16_export_negative_axes_keepdims; -// mod reduce_l2_fp8x23_export_do_not_keepdims; -// mod reduce_l2_fp8x23_export_keepdims; -// mod reduce_l2_fp8x23_export_negative_axes_keepdims; -// mod reduce_l1_fp16x16_export_do_not_keepdims; -// mod reduce_l1_fp16x16_export_keepdims; -// mod reduce_l1_fp16x16_export_negative_axes_keepdims; -// mod reduce_l1_fp8x23_export_do_not_keepdims; -// mod reduce_l1_fp8x23_export_keepdims; -// mod reduce_l1_fp8x23_export_negative_axes_keepdims; -// mod reduce_l1_i32_export_do_not_keepdims; -// mod reduce_l1_i32_export_keepdims; -// mod reduce_l1_i32_export_negative_axes_keepdims; -// mod reduce_l1_i8_export_do_not_keepdims; -// mod reduce_l1_i8_export_keepdims; -// mod reduce_l1_i8_export_negative_axes_keepdims; -// mod reduce_l1_u32_export_do_not_keepdims; -// mod reduce_l1_u32_export_keepdims; -// mod reduce_l1_u32_export_negative_axes_keepdims; -// mod reduce_prod_fp16x16_1D; -// mod reduce_prod_fp16x16_2D_default; -// mod reduce_prod_fp16x16_2D_keepdims; -// mod reduce_prod_fp16x16_2D_axis_1; -// mod reduce_prod_fp8x23_1D; -// mod reduce_prod_fp8x23_2D_default; -// mod reduce_prod_fp8x23_2D_keepdims; -// mod reduce_prod_fp8x23_2D_axis_1; -// mod reduce_prod_i32_1D; -// mod reduce_prod_i32_2D_default; -// mod reduce_prod_i32_2D_keepdims; -// mod reduce_prod_i32_2D_axis_1; -// mod reduce_prod_i8_1D; -// mod reduce_prod_i8_2D_default; -// mod reduce_prod_i8_2D_keepdims; -// mod reduce_prod_i8_2D_axis_1; -// mod reduce_prod_u32_1D; -// mod reduce_prod_u32_2D_default; -// mod reduce_prod_u32_2D_keepdims; -// mod reduce_prod_u32_2D_axis_1; -// mod sequence_length_fp16x16; -// mod sequence_length_fp16x16_broadcast; -// mod sequence_length_fp8x23; -// mod sequence_length_fp8x23_broadcast; -// mod sequence_length_i32; -// mod sequence_length_i32_broadcast; -// mod sequence_length_i8; -// mod sequence_length_i8_broadcast; -// mod sequence_length_u32; -// mod sequence_length_u32_broadcast; -// mod sequence_at_u32_positive; -// mod sequence_at_u32_negative; -// mod sequence_at_fp16x16_positive; -// mod sequence_at_fp16x16_negative; -// mod sequence_at_fp8x23_positive; -// mod sequence_at_fp8x23_negative; -// mod sequence_at_i32_positive; -// mod sequence_at_i32_negative; -// mod sequence_at_i8_positive; -// mod sequence_at_i8_negative; -// mod reduce_min_fp16x16_1D; -// mod reduce_min_fp16x16_2D_default; -// mod reduce_min_fp16x16_2D_keepdims; -// mod reduce_min_fp16x16_2D_axis_1; -// mod reduce_min_fp8x23_1D; -// mod reduce_min_fp8x23_2D_default; -// mod reduce_min_fp8x23_2D_keepdims; -// mod reduce_min_fp8x23_2D_axis_1; -// mod reduce_min_i32_1D; -// mod reduce_min_i32_2D_default; -// mod reduce_min_i32_2D_keepdims; -// mod reduce_min_i32_2D_axis_1; -// mod reduce_min_i8_1D; -// mod reduce_min_i8_2D_default; -// mod reduce_min_i8_2D_keepdims; -// mod reduce_min_i8_2D_axis_1; -// mod reduce_min_u32_1D; -// mod reduce_min_u32_2D_default; -// mod reduce_min_u32_2D_keepdims; -// mod reduce_min_u32_2D_axis_1; -// mod sequence_construct_fp16x16; -// mod sequence_construct_fp8x23; -// mod sequence_construct_i32; -// mod sequence_construct_i8; -// mod sequence_construct_u32; -// mod shrink_hard_fp16x16; -// mod shrink_soft_fp16x16; -// mod shrink_hard_fp8x23; -// mod shrink_soft_fp8x23; -// mod sequence_empty_fp16x16; -// mod sequence_empty_fp8x23; -// mod sequence_empty_i32; -// mod sequence_empty_i8; -// mod sequence_empty_u32; -// mod reduce_mean_fp16x16_1D; -// mod reduce_mean_fp16x16_2D_default; -// mod reduce_mean_fp16x16_2D_keepdims; -// mod reduce_mean_fp16x16_2D_axis_1; -// mod reduce_mean_fp8x23_1D; -// mod reduce_mean_fp8x23_2D_default; -// mod reduce_mean_fp8x23_2D_keepdims; -// mod reduce_mean_fp8x23_2D_axis_1; -// mod reduce_mean_i32_1D; -// mod reduce_mean_i32_2D_default; -// mod reduce_mean_i32_2D_keepdims; -// mod reduce_mean_i32_2D_axis_1; -// mod reduce_mean_i8_1D; -// mod reduce_mean_i8_2D_default; -// mod reduce_mean_i8_2D_keepdims; -// mod reduce_mean_i8_2D_axis_1; -// mod reduce_mean_u32_1D; -// mod reduce_mean_u32_2D_default; -// mod reduce_mean_u32_2D_keepdims; -// mod reduce_mean_u32_2D_axis_1; -// mod pow_fp16x16; -// mod pow_fp16x16_broadcast; -// mod pow_fp8x23; -// mod pow_fp8x23_broadcast; -// mod sequence_erase_u32_positive; -// mod sequence_erase_u32_negative; -// mod sequence_erase_u32_empty; -// mod sequence_erase_fp16x16_positive; -// mod sequence_erase_fp16x16_negative; -// mod sequence_erase_fp16x16_empty; -// mod sequence_erase_fp8x23_positive; -// mod sequence_erase_fp8x23_negative; -// mod sequence_erase_fp8x23_empty; -// mod sequence_erase_i32_positive; -// mod sequence_erase_i32_negative; -// mod sequence_erase_i32_empty; -// mod sequence_erase_i8_positive; -// mod sequence_erase_i8_negative; -// mod sequence_erase_i8_empty; -// mod sequence_insert_fp16x16; -// mod sequence_insert_fp8x23; -// mod sequence_insert_i32; -// mod sequence_insert_i8; -// mod sequence_insert_u32; -// mod concat_from_sequence_fp8x23_new_axis_zero; -// mod concat_from_sequence_fp8x23_new_axis_one; -// mod concat_from_sequence_fp8x23_new_axis_default; -// mod concat_from_sequence_fp16x16_new_axis_zero; -// mod concat_from_sequence_fp16x16_new_axis_one; -// mod concat_from_sequence_fp16x16_new_axis_default; -// mod concat_from_sequence_i32_new_axis_zero; -// mod concat_from_sequence_i32_new_axis_one; -// mod concat_from_sequence_i32_new_axis_default; -// mod concat_from_sequence_i8_new_axis_zero; -// mod concat_from_sequence_i8_new_axis_one; -// mod concat_from_sequence_i8_new_axis_default; -// mod concat_from_sequence_u32_new_axis_zero; -// mod concat_from_sequence_u32_new_axis_one; -// mod concat_from_sequence_u32_new_axis_default; -// mod is_nan_fp16x16; -// mod is_nan_fp8x23; -// mod is_inf_fp16x16; -// mod is_inf_fp8x23; -// mod is_inf_i32; -// mod is_inf_i8; -// mod is_inf_u32; -// mod is_pos_inf_fp16x16; -// mod is_neg_inf_fp16x16; -// mod is_pos_inf_fp8x23; -// mod is_neg_inf_fp8x23; -// mod is_pos_inf_i32; -// mod is_neg_inf_i32; -// mod is_pos_inf_i8; -// mod is_neg_inf_i8; -// mod reduce_log_sum_fp8x23_export_do_not_keepdims; -// mod reduce_log_sum_fp8x23_export_keepdims; -// mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; -// mod reduce_log_sum_fp16x16_export_do_not_keepdims; -// mod reduce_log_sum_fp16x16_export_keepdims; -// mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; -// mod and_bool; -// mod erf_fp16x16; -// mod erf_fp8x23; -// mod unique_fp16x16_without_axis_sorted; -// mod unique_fp16x16_with_axis_zero_sorted; -// mod unique_u32_without_axis_sorted; -// mod unique_u32_without_axis_not_sorted; -// mod unique_u32_with_axis_zero_sorted; -// mod unique_u32_with_axis_zero_not_sorted; -// mod unique_u32_with_axis_one_sorted; -// mod unique_u32_with_axis_one_not_sorted; -// mod gather_nd_fp16x16_3d_default; -// mod gather_nd_fp16x16_3d_batch_dims1; -// mod gather_nd_fp16x16_3d_batch_dims2; -// mod gather_nd_fp8x23_3d_default; -// mod gather_nd_fp8x23_3d_batch_dims1; -// mod gather_nd_fp8x23_3d_batch_dims2; -// mod gather_nd_i32_3d_default; -// mod gather_nd_i32_3d_batch_dims1; -// mod gather_nd_i32_3d_batch_dims2; -// mod gather_nd_i8_3d_default; -// mod gather_nd_i8_3d_batch_dims1; -// mod gather_nd_u32_default; -// mod gather_nd_u32_batch_dims1; -// mod gather_nd_u32_batch_dims2; -// mod resize_upsample_scales_nearest; -// mod resize_downsample_scales_cubic; -// mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; -// mod resize_downsample_scales_cubic_align_corners; -// mod resize_upsample_scales_linear; -// mod resize_downsample_scales_linear_align_corners; -// mod resize_downsample_scales_nearest; -// mod resize_upsample_scales_cubic; -// mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; -// mod resize_upsample_scales_cubic_align_corners; -// mod resize_upsample_scales_cubic_asymmetric; -// mod resize_upsample_scales_linear_align_corners; -// mod resize_upsample_sizes_nearest; -// mod resize_upsample_sizes_cubic; -// mod resize_downsample_sizes_cubic; -// mod resize_downsample_sizes_nearest; -// mod resize_upsample_scales_linear_half_pixel_symmetric; -// mod resize_downsample_scales_cubic_antialias; -// mod resize_downsample_scales_linear_antialias; -// mod resize_downsample_sizes_cubic_antialias; -// mod resize_downsample_sizes_linear_pytorch_half_pixel; -// mod resize_tf_crop_and_resize; -// mod resize_tf_crop_and_resize_extrapolation_value; -// mod resize_upsample_scales_nearest_axes_2_3; -// mod resize_upsample_scales_nearest_axes_3_2; -// mod resize_upsample_sizes_nearest_axes_2_3; -// mod resize_upsample_sizes_nearest_ceil_half_pixel; -// mod resize_upsample_sizes_nearest_floor_align_corners; -// mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; -// mod resize_downsample_scales_linear_half_pixel_symmetric; -// mod resize_downsample_sizes_nearest_not_larger; -// mod resize_downsample_sizes_nearest_not_smaller; -// mod resize_tf_crop_and_resize_axes_2_3; -// mod resize_tf_crop_and_resize_axes_3_2; -// mod resize_upsample_sizes_nearest_axes_3_2; -// mod resize_upsample_sizes_nearest_not_larger; -// mod resize_upsample_sizes_nearest_not_smaller; -// mod compress_fp16x16_3d_default; -// mod compress_fp16x16_3d_axis1; -// mod compress_fp16x16_3d_axis2; -// mod compress_fp16x16_3d_axis3; -// mod compress_fp16x16_3d_noaxis; -// mod compress_fp8x23_3d_default; -// mod compress_fp8x23_3d_axis1; -// mod compress_fp8x23_3d_axis2; -// mod compress_i32_3d_default; -// mod compress_i32_3d_axis1; -// mod compress_i32_3d_axis2; -// mod compress_i8_3d_default; -// mod compress_i8_3d_axis1; -// mod compress_i8_3d_axis2; -// mod compress_u32_3d_default; -// mod compress_u32_3d_axis1; -// mod compress_u32_3d_axis2; -// mod compress_u32_3d_axis2_2; -// mod compress_u32_3d_axis3; -// mod reduce_log_sum_exp_fp32x32_export_do_not_keepdims; -// mod reduce_log_sum_exp_fp32x32_export_keepdims; -// mod reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims; -// mod layer_normalization_default_axis; -// mod layer_normalization_4d_axis0; -// mod layer_normalization_4d_axis1; -// mod layer_normalization_4d_axis2; -// mod layer_normalization_4d_axis3; -// mod layer_normalization_3d_axis0_epsilon; -// mod layer_normalization_3d_axis_negative_3_epsilon; -// mod layer_normalization_3d_axis1_epsilon; -// mod layer_normalization_3d_axis2_epsilon; -// mod layer_normalization_4d_axis_negative_4; -// mod layer_normalization_4d_axis_negative_3; -// mod layer_normalization_4d_axis_negative_2; -// mod layer_normalization_4d_axis_negative_1; -// mod layer_normalization_3d_axis_negative_2_epsilon; -// mod layer_normalization_3d_axis_negative_1_epsilon; -// mod layer_normalization_test; -// mod split_u32_1d_equal_parts; -// mod split_u32_2d_equal_parts; -// mod split_u32_zero_size; -// mod split_u32_1d_variable_parts; -// mod split_u32_2d_variable_parts; -// mod split_u32_1d_uneven; -// mod split_u32_2d_uneven; -// mod split_fp16x16_1d_equal_parts; -// mod split_fp16x16_1d_variable_parts; -// mod split_fp16x16_2d_equal_parts; -// mod split_fp16x16_2d_variable_parts; -// mod split_fp16x16_zero_size; -// mod split_fp16x16_1d_uneven; -// mod split_fp16x16_2d_uneven; -// mod grid_sample; -// mod grid_sample_cubic; -// mod grid_sample_aligncorners; -// mod grid_sample_nearest; -// mod grid_sample_nearest_aligncorner; -// mod grid_sample_padding_border; -// mod grid_sample_padding_reflection; -// mod grid_sample_padding_zeros; -// mod col2im; -// mod col2im_5D; -// mod col2im_dilations; -// mod col2im_pads; -// mod col2im_strides; -// mod random_uniform_like_fp16x16; -// mod random_uniform_like_fp8x23; -// mod range_fp8x23; -// mod range_fp16x16; -// mod range_i32; -// mod range_i8; -// mod range_u32; -// mod hann_window_fp8x23; -// mod hann_window_fp16x16; -// mod hamming_window_fp16x16; -// mod hamming_window_fp8x23; -// mod blackman_window_fp16x16; -// mod blackman_window_fp8x23; -// mod split_to_sequence_fp16x16_1d_equal_parts; -// mod split_to_sequence_fp16x16_1d_variable_parts; -// mod split_to_sequence_fp16x16_2d_equal_parts; -// mod split_to_sequence_fp16x16_2d_variable_parts; -// mod split_to_sequence_fp16x16_zero_size; -// mod split_to_sequence_fp16x16_1d_uneven; -// mod split_to_sequence_fp16x16_2d_uneven; -// mod split_to_sequence_u32_1d_equal_parts; -// mod split_to_sequence_u32_1d_variable_parts; -// mod split_to_sequence_u32_2d_equal_parts; -// mod split_to_sequence_u32_2d_variable_parts; -// mod split_to_sequence_u32_zero_size; -// mod split_to_sequence_u32_1d_uneven; -// mod split_to_sequence_u32_2d_uneven; -// mod split_to_sequence_2d_scalar; -// mod split_to_sequence_2d_nokeepdims; -// mod split_to_sequence_1d_nokeepdims; -// mod reverse_sequence_fp16x16_batch_equal_parts; -// mod reverse_sequence_fp16x16_time_equal_parts; -// mod reverse_sequence_i32_batch_equal_parts; -// mod reverse_sequence_i32_time_equal_parts; -// mod reverse_sequence_i8_batch_equal_parts; -// mod reverse_sequence_i8_time_equal_parts; -// mod reverse_sequence_u32_4x4_batch; -// mod reverse_sequence_u32_4x4_time; -// mod reverse_sequence_u32_3x3_batch; -// mod reverse_sequence_u32_3x3_time; -// mod reverse_sequence_different_dimensions_4_5; -// mod reverse_sequence_different_dimensions_2_4; -// mod reverse_sequence_different_dimensions_1_6; -// mod reverse_sequence_different_dimensions_3x9_batch; -// mod reverse_sequence_different_dimensions_3x9_time; -// mod conv_transpose; -// mod conv_transpose_1d; -// mod conv_transpose_3d; -// mod conv_transpose_attributes; -// mod conv_transpose_autopad_same; -// mod conv_transpose_dilations; -// mod conv_transpose_pads; -// mod conv_transpose_group_2; -// mod conv_transpose_group_2_image_3; -// mod depth_to_space_fp16x16; -// mod depth_to_space_fp8x23; -// mod depth_to_space_i32; -// mod depth_to_space_i8; -// mod depth_to_space_u32; -// mod space_to_depth_fp16x16; -// mod space_to_depth_fp8x23; -// mod space_to_depth_i32; -// mod space_to_depth_i8; -// mod space_to_depth_u32; -// mod scatter_nd_fp16x16_3d_default; -// mod scatter_nd_fp16x16_3d_add; -// mod scatter_nd_fp16x16_3d_mul; -// mod scatter_nd_fp16x16_3d_max; -// mod scatter_nd_fp16x16_3d_min; -// mod scatter_nd_fp8x23_3d_default; -// mod scatter_nd_fp8x23_3d_add; -// mod scatter_nd_fp8x23_3d_mul; -// mod scatter_nd_fp8x23_3d_max; -// mod scatter_nd_fp8x23_3d_min; -// mod scatter_nd_u32_default; -// mod scatter_nd_u32_add; -// mod scatter_nd_u32_mul; -// mod scatter_nd_u32_max; -// mod scatter_nd_u32_min; -// mod conv_2D_with_padding; -// mod conv_1D_no_padding; -// mod conv_1D_with_padding; -// mod conv_3D_no_padding; -// mod conv_3D_with_padding; -// mod conv_4D_no_padding; -// mod conv_2D_with_2_groups; -// mod conv_2D_with_autopad_same; -// mod conv_2D_with_strides_asymmetric_padding; -// mod conv_2D_with_strides_with_padding; -// mod conv_4D_with_padding; -// mod label_encoder_fp16x16_3d_default; -// mod label_encoder_fp8x23_default; -// mod label_encoder_i8_default; -// mod label_encoder_i32_default; -// mod label_encoder_u32_default; +mod abs_fp16x16; +mod abs_fp8x23; +mod abs_i32; +mod abs_i8; +mod acos_fp16x16; +mod acos_fp8x23; +mod acosh_fp16x16; +mod acosh_fp8x23; +mod add_fp16x16; +mod add_fp16x16_broadcast; +mod add_fp8x23; +mod add_fp8x23_broadcast; +mod add_i32; +mod add_i32_broadcast; +mod add_i8; +mod add_i8_broadcast; +mod add_u32; +mod add_u32_broadcast; +mod argmin_fp16x16_1D_default; +mod argmin_fp16x16_1D_keepdims_false; +mod argmin_fp16x16_1D_last_index; +mod argmin_fp16x16_2D_default; +mod argmin_fp16x16_2D_keepdims_false; +mod argmin_fp16x16_2D_last_index; +mod argmin_fp16x16_3D_default; +mod argmin_fp16x16_3D_keepdims_false; +mod argmin_fp16x16_3D_last_index; +mod argmin_fp8x23_1D_default; +mod argmin_fp8x23_1D_keepdims_false; +mod argmin_fp8x23_1D_last_index; +mod argmin_fp8x23_2D_default; +mod argmin_fp8x23_2D_keepdims_false; +mod argmin_fp8x23_2D_last_index; +mod argmin_fp8x23_3D_default; +mod argmin_fp8x23_3D_keepdims_false; +mod argmin_fp8x23_3D_last_index; +mod argmin_i32_1D_default; +mod argmin_i32_1D_keepdims_false; +mod argmin_i32_1D_last_index; +mod argmin_i32_2D_default; +mod argmin_i32_2D_keepdims_false; +mod argmin_i32_2D_last_index; +mod argmin_i32_3D_default; +mod argmin_i32_3D_keepdims_false; +mod argmin_i32_3D_last_index; +mod argmin_i8_1D_default; +mod argmin_i8_1D_keepdims_false; +mod argmin_i8_1D_last_index; +mod argmin_i8_2D_default; +mod argmin_i8_2D_keepdims_false; +mod argmin_i8_2D_last_index; +mod argmin_i8_3D_default; +mod argmin_i8_3D_keepdims_false; +mod argmin_i8_3D_last_index; +mod argmin_u32_1D_default; +mod argmin_u32_1D_keepdims_false; +mod argmin_u32_1D_last_index; +mod argmin_u32_2D_default; +mod argmin_u32_2D_keepdims_false; +mod argmin_u32_2D_last_index; +mod argmin_u32_3D_default; +mod argmin_u32_3D_keepdims_false; +mod argmin_u32_3D_last_index; +mod asin_fp16x16; +mod asin_fp8x23; +mod asinh_fp16x16; +mod asinh_fp8x23; +mod atan_fp16x16; +mod atan_fp8x23; +mod ceil_fp16x16; +mod ceil_fp8x23; +mod concat_fp16x16_1d; +mod concat_fp16x16_2d; +mod concat_fp16x16_3d_default; +mod concat_fp16x16_3d_axis_1; +mod concat_fp16x16_3d_axis_2; +mod concat_fp16x16_3d_three_tensors_axis_1; +mod concat_fp16x16_3d_three_tensors_axis_2; +mod concat_fp8x23_1d; +mod concat_fp8x23_2d; +mod concat_fp8x23_3d_default; +mod concat_fp8x23_3d_axis_1; +mod concat_fp8x23_3d_axis_2; +mod concat_fp8x23_3d_three_tensors_axis_1; +mod concat_fp8x23_3d_three_tensors_axis_2; +mod concat_i32_1d; +mod concat_i32_2d; +mod concat_i32_3d_default; +mod concat_i32_3d_axis_1; +mod concat_i32_3d_axis_2; +mod concat_i32_3d_three_tensors_axis_1; +mod concat_i32_3d_three_tensors_axis_2; +mod concat_i8_1d; +mod concat_i8_2d; +mod concat_i8_3d_default; +mod concat_i8_3d_axis_1; +mod concat_i8_3d_axis_2; +mod concat_i8_3d_three_tensors_axis_1; +mod concat_i8_3d_three_tensors_axis_2; +mod concat_u32_1d; +mod concat_u32_2d; +mod concat_u32_3d_default; +mod concat_u32_3d_axis_1; +mod concat_u32_3d_axis_2; +mod concat_u32_3d_three_tensors_axis_1; +mod concat_u32_3d_three_tensors_axis_2; +mod cos_fp16x16; +mod cos_fp8x23; +mod cosh_fp16x16; +mod cosh_fp8x23; +mod cumsum_fp16x16_1d_default; +mod cumsum_fp16x16_1d_exclusive; +mod cumsum_fp16x16_1d_reverse; +mod cumsum_fp16x16_1d_reverse_exclusive; +mod cumsum_fp16x16_2d_axis_0; +mod cumsum_fp16x16_2d_axis_1; +mod cumsum_fp8x23_1d_default; +mod cumsum_fp8x23_1d_exclusive; +mod cumsum_fp8x23_1d_reverse; +mod cumsum_fp8x23_1d_reverse_exclusive; +mod cumsum_fp8x23_2d_axis_0; +mod cumsum_fp8x23_2d_axis_1; +mod cumsum_i32_1d_default; +mod cumsum_i32_1d_exclusive; +mod cumsum_i32_1d_reverse; +mod cumsum_i32_1d_reverse_exclusive; +mod cumsum_i32_2d_axis_0; +mod cumsum_i32_2d_axis_1; +mod cumsum_i8_1d_default; +mod cumsum_i8_1d_exclusive; +mod cumsum_i8_1d_reverse; +mod cumsum_i8_1d_reverse_exclusive; +mod cumsum_i8_2d_axis_0; +mod cumsum_i8_2d_axis_1; +mod cumsum_u32_1d_default; +mod cumsum_u32_1d_exclusive; +mod cumsum_u32_1d_reverse; +mod cumsum_u32_1d_reverse_exclusive; +mod cumsum_u32_2d_axis_0; +mod cumsum_u32_2d_axis_1; +mod div_fp16x16; +mod div_fp16x16_broadcast; +mod div_fp8x23; +mod div_fp8x23_broadcast; +mod div_i32; +mod div_i32_broadcast; +mod div_i8; +mod div_i8_broadcast; +mod div_u32; +mod div_u32_broadcast; +mod equal_fp16x16; +mod equal_fp16x16_broadcast; +mod equal_fp8x23; +mod equal_fp8x23_broadcast; +mod equal_i32; +mod equal_i32_broadcast; +mod equal_i8; +mod equal_i8_broadcast; +mod equal_u32; +mod equal_u32_broadcast; +mod exp_fp16x16; +mod exp_fp8x23; +mod less_equal_fp16x16; +mod less_equal_fp16x16_broadcast; +mod less_equal_fp8x23; +mod less_equal_fp8x23_broadcast; +mod less_equal_i32; +mod less_equal_i32_broadcast; +mod less_equal_i8; +mod less_equal_i8_broadcast; +mod less_equal_u32; +mod less_equal_u32_broadcast; +mod greater_fp16x16; +mod greater_fp16x16_broadcast; +mod greater_fp8x23; +mod greater_fp8x23_broadcast; +mod greater_i32; +mod greater_i32_broadcast; +mod greater_i8; +mod greater_i8_broadcast; +mod greater_u32; +mod greater_u32_broadcast; +mod leaky_relu_fp16x16; +mod leaky_relu_fp8x23; +mod linear_fp16x16; +mod linear_fp8x23; +mod linear_i32; +mod linear_i8; +mod linear_u32; +mod log_fp16x16; +mod log_fp8x23; +mod logsoftmax_fp16x16_axis_0; +mod logsoftmax_fp16x16_axis_1; +mod logsoftmax_fp8x23_axis_0; +mod logsoftmax_fp8x23_axis_1; +mod matmul_fp16x16_1d; +mod matmul_fp16x16_2x2; +mod matmul_fp16x16_2x1; +mod matmul_fp16x16_1x2; +mod matmul_fp8x23_1d; +mod matmul_fp8x23_2x2; +mod matmul_fp8x23_2x1; +mod matmul_fp8x23_1x2; +mod matmul_i32_1d; +mod matmul_i32_2x2; +mod matmul_i32_2x1; +mod matmul_i32_1x2; +mod matmul_i8_1d; +mod matmul_i8_2x2; +mod matmul_i8_2x1; +mod matmul_i8_1x2; +mod matmul_u32_1d; +mod matmul_u32_2x2; +mod matmul_u32_2x1; +mod matmul_u32_1x2; +mod mul_fp16x16; +mod mul_fp16x16_broadcast; +mod mul_fp8x23; +mod mul_fp8x23_broadcast; +mod mul_i32; +mod mul_i32_broadcast; +mod mul_i8; +mod mul_i8_broadcast; +mod mul_u32; +mod mul_u32_broadcast; +mod or_fp16x16; +mod or_fp16x16_broadcast; +mod or_fp8x23; +mod or_fp8x23_broadcast; +mod or_i32; +mod or_i32_broadcast; +mod or_i8; +mod or_i8_broadcast; +mod or_u32; +mod or_u32_broadcast; +mod relu_fp16x16; +mod relu_fp8x23; +mod relu_i32; +mod relu_i8; +mod sigmoid_fp16x16; +mod sigmoid_fp8x23; +mod sin_fp16x16; +mod sin_fp8x23; +mod sinh_fp16x16; +mod sinh_fp8x23; +mod softplus_fp8x23; +mod softplus_fp16x16; +mod softsign_fp8x23; +mod softsign_fp16x16; +mod sqrt_fp16x16; +mod sqrt_fp8x23; +mod sub_fp16x16; +mod sub_fp16x16_broadcast; +mod sub_fp8x23; +mod sub_fp8x23_broadcast; +mod sub_i32; +mod sub_i32_broadcast; +mod sub_i8; +mod sub_i8_broadcast; +mod sub_u32; +mod sub_u32_broadcast; +mod tanh_fp16x16; +mod tanh_fp8x23; +mod transpose_fp16x16_2d; +mod transpose_fp16x16_3d; +mod transpose_fp8x23_2d; +mod transpose_fp8x23_3d; +mod transpose_i32_2d; +mod transpose_i32_3d; +mod transpose_i8_2d; +mod transpose_i8_3d; +mod transpose_u32_2d; +mod transpose_u32_3d; +mod xor_fp16x16; +mod xor_fp16x16_broadcast; +mod xor_fp8x23; +mod xor_fp8x23_broadcast; +mod xor_i32; +mod xor_i32_broadcast; +mod xor_i8; +mod xor_i8_broadcast; +mod xor_u32; +mod xor_u32_broadcast; +mod greater_equal_fp16x16; +mod greater_equal_fp16x16_broadcast; +mod greater_equal_fp8x23; +mod greater_equal_fp8x23_broadcast; +mod greater_equal_i32; +mod greater_equal_i32_broadcast; +mod greater_equal_i8; +mod greater_equal_i8_broadcast; +mod greater_equal_u32; +mod greater_equal_u32_broadcast; +mod slice_fp16x16_2d; +mod slice_fp16x16_3d; +mod slice_fp8x23_2d; +mod slice_fp8x23_3d; +mod slice_i32_2d; +mod slice_i32_3d; +mod slice_i8_2d; +mod slice_i8_3d; +mod slice_u32_2d; +mod slice_u32_3d; +mod nonzero_fp16x16_2d; +mod nonzero_fp16x16_3d; +mod nonzero_fp8x23_2d; +mod nonzero_fp8x23_3d; +mod nonzero_i32_2d; +mod nonzero_i32_3d; +mod nonzero_i8_2d; +mod nonzero_i8_3d; +mod nonzero_u32_2d; +mod nonzero_u32_3d; +mod squeeze_fP16x16; +mod squeeze_fP8x23; +mod squeeze_i32; +mod squeeze_i8; +mod squeeze_u32; +mod unsqueeze_fp16x16_2d; +mod unsqueeze_fp16x16_3d; +mod unsqueeze_fp8x23_2d; +mod unsqueeze_fp8x23_3d; +mod unsqueeze_i32_2d; +mod unsqueeze_i32_3d; +mod unsqueeze_i8_2d; +mod unsqueeze_i8_3d; +mod unsqueeze_u32_2d; +mod unsqueeze_u32_3d; +mod sign_fP16x16; +mod sign_fP8x23; +mod sign_fail; +mod sign_i32; +mod sign_i8; +mod clip_fp16x16_2d; +mod clip_fp16x16_3d; +mod clip_fp8x23_2d; +mod clip_fp8x23_3d; +mod clip_i32_2d; +mod clip_i32_3d; +mod clip_i8_2d; +mod clip_i8_3d; +mod clip_u32_2d; +mod clip_u32_3d; +mod identity_fP16x16; +mod identity_fP8x23; +mod identity_i32; +mod identity_i8; +mod identity_u32; +mod thresholded_relu_fp16x16; +mod thresholded_relu_fp8x23; +mod hard_sigmoid_fp8x23; +mod hard_sigmoid_fp16x16; +mod neg_fp16x16; +mod neg_fp8x23; +mod neg_i32; +mod neg_i8; +mod gemm_all_attributes; +mod gemm_alpha; +mod gemm_beta; +mod gemm_default_matrix_bias; +mod gemm_default_vector_bias; +mod gemm_default_no_bias; +mod gemm_transposeA; +mod gemm_transposeB; +mod min_fp16x16_three_tensors; +mod min_fp16x16_broadcast_three_tensors; +mod min_fp16x16_two_tensors; +mod min_fp16x16_broadcast_two_tensors; +mod min_fp8x23_three_tensors; +mod min_fp8x23_broadcast_three_tensors; +mod min_fp8x23_two_tensors; +mod min_fp8x23_broadcast_two_tensors; +mod min_i32_three_tensors; +mod min_i32_broadcast_three_tensors; +mod min_i32_two_tensors; +mod min_i32_broadcast_two_tensors; +mod min_i8_three_tensors; +mod min_i8_broadcast_three_tensors; +mod min_i8_two_tensors; +mod min_i8_broadcast_two_tensors; +mod min_u32_three_tensors; +mod min_u32_broadcast_three_tensors; +mod min_u32_two_tensors; +mod min_u32_broadcast_two_tensors; +mod where_fp16x16; +mod where_fp16x16_broadcast; +mod where_fp8x23; +mod where_fp8x23_broadcast; +mod where_i32; +mod where_i32_broadcast; +mod where_i8; +mod where_i8_broadcast; +mod where_u32; +mod where_u32_broadcast; +mod not_bool; +mod round_fp16x16; +mod round_fp8x23; +mod max_fp16x16_three_tensors; +mod max_fp16x16_broadcast_three_tensors; +mod max_fp16x16_two_tensors; +mod max_fp16x16_broadcast_two_tensors; +mod max_fp8x23_three_tensors; +mod max_fp8x23_broadcast_three_tensors; +mod max_fp8x23_two_tensors; +mod max_fp8x23_broadcast_two_tensors; +mod max_i32_three_tensors; +mod max_i32_broadcast_three_tensors; +mod max_i32_two_tensors; +mod max_i32_broadcast_two_tensors; +mod max_i8_three_tensors; +mod max_i8_broadcast_three_tensors; +mod max_i8_two_tensors; +mod max_i8_broadcast_two_tensors; +mod max_u32_three_tensors; +mod max_u32_broadcast_three_tensors; +mod max_u32_two_tensors; +mod max_u32_broadcast_two_tensors; +mod scatter_fp16x16_3d_default; +mod scatter_fp16x16_3d_axis1; +mod scatter_fp16x16_3d_axis1_add; +mod scatter_fp8x23_default; +mod scatter_fp8x23_axis1; +mod scatter_fp8x23_mul; +mod scatter_i8_default; +mod scatter_i8_axis1; +mod scatter_i8_axis1_max; +mod scatter_u32_default; +mod scatter_u32_axis1; +mod scatter_u32_add; +mod array_feature_extractor_1D_i32; +mod array_feature_extractor_1D_fp8x23; +mod array_feature_extractor_1D_fp16x16; +mod array_feature_extractor_2D_i32; +mod array_feature_extractor_2D_fp8x23; +mod array_feature_extractor_2D_fp16x16; +mod array_feature_extractor_3D_i32; +mod array_feature_extractor_3D_fp8x23; +mod array_feature_extractor_3D_fp16x16; +mod binarizer_fp16x16; +mod binarizer_fp8x23; +mod tril_fp16x16; +mod tril_fp16x16_neg; +mod tril_fp16x16_one_row; +mod tril_fp16x16_out_neg; +mod tril_fp16x16_out_pos; +mod tril_fp16x16_pos; +mod tril_fp16x16_square; +mod tril_fp16x16_square_neg; +mod tril_fp16x16_zero; +mod triu_fp16x16; +mod triu_fp16x16_neg; +mod triu_fp16x16_one_row; +mod triu_fp16x16_out_neg; +mod triu_fp16x16_out_pos; +mod triu_fp16x16_pos; +mod triu_fp16x16_square; +mod triu_fp16x16_square_neg; +mod triu_fp16x16_zero; +mod tril_fp8x23; +mod tril_fp8x23_neg; +mod tril_fp8x23_one_row; +mod tril_fp8x23_out_neg; +mod tril_fp8x23_out_pos; +mod tril_fp8x23_pos; +mod tril_fp8x23_square; +mod tril_fp8x23_square_neg; +mod tril_fp8x23_zero; +mod triu_fp8x23; +mod triu_fp8x23_neg; +mod triu_fp8x23_one_row; +mod triu_fp8x23_out_neg; +mod triu_fp8x23_out_pos; +mod triu_fp8x23_pos; +mod triu_fp8x23_square; +mod triu_fp8x23_square_neg; +mod triu_fp8x23_zero; +mod tril_i32; +mod tril_neg_i32; +mod tril_i32_one_row; +mod tril_i32_out_neg; +mod tril_i32_out_pos; +mod tril_i32_pos; +mod tril_i32_square; +mod tril_i32_square_neg; +mod tril_i32_zero; +mod triu_i32; +mod triu_i32_neg; +mod triu_i32_one_row; +mod triu_i32_out_neg; +mod triu_i32_out_pos; +mod triu_i32_pos; +mod triu_i32_square; +mod triu_i32_square_neg; +mod triu_i32_zero; +mod tril_i8; +mod tril_i8_neg; +mod tril_i8_one_row; +mod tril_i8_out_neg; +mod tril_i8_out_pos; +mod tril_i8_pos; +mod tril_i8_square; +mod tril_i8_square_neg; +mod tril_i8_zero; +mod triu_i8; +mod triu_i8_neg; +mod triu_i8_one_row; +mod triu_i8_out_neg; +mod triu_i8_out_pos; +mod triu_i8_pos; +mod triu_i8_square; +mod triu_i8_square_neg; +mod triu_i8_zero; +mod tril_u32; +mod tril_u32_neg; +mod tril_u32_one_row; +mod tril_u32_out_neg; +mod tril_u32_out_pos; +mod tril_u32_pos; +mod tril_u32_square; +mod tril_u32_square_neg; +mod tril_u32_zero; +mod triu_u32; +mod triu_u32_neg; +mod triu_u32_one_row; +mod triu_u32_out_neg; +mod triu_u32_out_pos; +mod triu_u32_pos; +mod triu_u32_square; +mod triu_u32_square_neg; +mod triu_u32_zero; +mod reduce_sum_square_fp16x16_export_do_not_keepdims; +mod reduce_sum_square_fp16x16_export_keepdims; +mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; +mod reduce_sum_square_fp8x23_export_do_not_keepdims; +mod reduce_sum_square_fp8x23_export_keepdims; +mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; +mod reduce_sum_square_i32_export_do_not_keepdims; +mod reduce_sum_square_i32_export_keepdims; +mod reduce_sum_square_i32_export_negative_axes_keepdims; +mod reduce_sum_square_i8_export_do_not_keepdims; +mod reduce_sum_square_i8_export_keepdims; +mod reduce_sum_square_i8_export_negative_axes_keepdims; +mod reduce_sum_square_u32_export_do_not_keepdims; +mod reduce_sum_square_u32_export_keepdims; +mod reduce_sum_square_u32_export_negative_axes_keepdims; +mod reduce_l2_fp16x16_export_do_not_keepdims; +mod reduce_l2_fp16x16_export_keepdims; +mod reduce_l2_fp16x16_export_negative_axes_keepdims; +mod reduce_l2_fp8x23_export_do_not_keepdims; +mod reduce_l2_fp8x23_export_keepdims; +mod reduce_l2_fp8x23_export_negative_axes_keepdims; +mod reduce_l1_fp16x16_export_do_not_keepdims; +mod reduce_l1_fp16x16_export_keepdims; +mod reduce_l1_fp16x16_export_negative_axes_keepdims; +mod reduce_l1_fp8x23_export_do_not_keepdims; +mod reduce_l1_fp8x23_export_keepdims; +mod reduce_l1_fp8x23_export_negative_axes_keepdims; +mod reduce_l1_i32_export_do_not_keepdims; +mod reduce_l1_i32_export_keepdims; +mod reduce_l1_i32_export_negative_axes_keepdims; +mod reduce_l1_i8_export_do_not_keepdims; +mod reduce_l1_i8_export_keepdims; +mod reduce_l1_i8_export_negative_axes_keepdims; +mod reduce_l1_u32_export_do_not_keepdims; +mod reduce_l1_u32_export_keepdims; +mod reduce_l1_u32_export_negative_axes_keepdims; +mod reduce_prod_fp16x16_1D; +mod reduce_prod_fp16x16_2D_default; +mod reduce_prod_fp16x16_2D_keepdims; +mod reduce_prod_fp16x16_2D_axis_1; +mod reduce_prod_fp8x23_1D; +mod reduce_prod_fp8x23_2D_default; +mod reduce_prod_fp8x23_2D_keepdims; +mod reduce_prod_fp8x23_2D_axis_1; +mod reduce_prod_i32_1D; +mod reduce_prod_i32_2D_default; +mod reduce_prod_i32_2D_keepdims; +mod reduce_prod_i32_2D_axis_1; +mod reduce_prod_i8_1D; +mod reduce_prod_i8_2D_default; +mod reduce_prod_i8_2D_keepdims; +mod reduce_prod_i8_2D_axis_1; +mod reduce_prod_u32_1D; +mod reduce_prod_u32_2D_default; +mod reduce_prod_u32_2D_keepdims; +mod reduce_prod_u32_2D_axis_1; +mod sequence_length_fp16x16; +mod sequence_length_fp16x16_broadcast; +mod sequence_length_fp8x23; +mod sequence_length_fp8x23_broadcast; +mod sequence_length_i32; +mod sequence_length_i32_broadcast; +mod sequence_length_i8; +mod sequence_length_i8_broadcast; +mod sequence_length_u32; +mod sequence_length_u32_broadcast; +mod sequence_at_u32_positive; +mod sequence_at_u32_negative; +mod sequence_at_fp16x16_positive; +mod sequence_at_fp16x16_negative; +mod sequence_at_fp8x23_positive; +mod sequence_at_fp8x23_negative; +mod sequence_at_i32_positive; +mod sequence_at_i32_negative; +mod sequence_at_i8_positive; +mod sequence_at_i8_negative; +mod reduce_min_fp16x16_1D; +mod reduce_min_fp16x16_2D_default; +mod reduce_min_fp16x16_2D_keepdims; +mod reduce_min_fp16x16_2D_axis_1; +mod reduce_min_fp8x23_1D; +mod reduce_min_fp8x23_2D_default; +mod reduce_min_fp8x23_2D_keepdims; +mod reduce_min_fp8x23_2D_axis_1; +mod reduce_min_i32_1D; +mod reduce_min_i32_2D_default; +mod reduce_min_i32_2D_keepdims; +mod reduce_min_i32_2D_axis_1; +mod reduce_min_i8_1D; +mod reduce_min_i8_2D_default; +mod reduce_min_i8_2D_keepdims; +mod reduce_min_i8_2D_axis_1; +mod reduce_min_u32_1D; +mod reduce_min_u32_2D_default; +mod reduce_min_u32_2D_keepdims; +mod reduce_min_u32_2D_axis_1; +mod sequence_construct_fp16x16; +mod sequence_construct_fp8x23; +mod sequence_construct_i32; +mod sequence_construct_i8; +mod sequence_construct_u32; +mod shrink_hard_fp16x16; +mod shrink_soft_fp16x16; +mod shrink_hard_fp8x23; +mod shrink_soft_fp8x23; +mod sequence_empty_fp16x16; +mod sequence_empty_fp8x23; +mod sequence_empty_i32; +mod sequence_empty_i8; +mod sequence_empty_u32; +mod reduce_mean_fp16x16_1D; +mod reduce_mean_fp16x16_2D_default; +mod reduce_mean_fp16x16_2D_keepdims; +mod reduce_mean_fp16x16_2D_axis_1; +mod reduce_mean_fp8x23_1D; +mod reduce_mean_fp8x23_2D_default; +mod reduce_mean_fp8x23_2D_keepdims; +mod reduce_mean_fp8x23_2D_axis_1; +mod reduce_mean_i32_1D; +mod reduce_mean_i32_2D_default; +mod reduce_mean_i32_2D_keepdims; +mod reduce_mean_i32_2D_axis_1; +mod reduce_mean_i8_1D; +mod reduce_mean_i8_2D_default; +mod reduce_mean_i8_2D_keepdims; +mod reduce_mean_i8_2D_axis_1; +mod reduce_mean_u32_1D; +mod reduce_mean_u32_2D_default; +mod reduce_mean_u32_2D_keepdims; +mod reduce_mean_u32_2D_axis_1; +mod pow_fp16x16; +mod pow_fp16x16_broadcast; +mod pow_fp8x23; +mod pow_fp8x23_broadcast; +mod sequence_erase_u32_positive; +mod sequence_erase_u32_negative; +mod sequence_erase_u32_empty; +mod sequence_erase_fp16x16_positive; +mod sequence_erase_fp16x16_negative; +mod sequence_erase_fp16x16_empty; +mod sequence_erase_fp8x23_positive; +mod sequence_erase_fp8x23_negative; +mod sequence_erase_fp8x23_empty; +mod sequence_erase_i32_positive; +mod sequence_erase_i32_negative; +mod sequence_erase_i32_empty; +mod sequence_erase_i8_positive; +mod sequence_erase_i8_negative; +mod sequence_erase_i8_empty; +mod sequence_insert_fp16x16; +mod sequence_insert_fp8x23; +mod sequence_insert_i32; +mod sequence_insert_i8; +mod sequence_insert_u32; +mod concat_from_sequence_fp8x23_new_axis_zero; +mod concat_from_sequence_fp8x23_new_axis_one; +mod concat_from_sequence_fp8x23_new_axis_default; +mod concat_from_sequence_fp16x16_new_axis_zero; +mod concat_from_sequence_fp16x16_new_axis_one; +mod concat_from_sequence_fp16x16_new_axis_default; +mod concat_from_sequence_i32_new_axis_zero; +mod concat_from_sequence_i32_new_axis_one; +mod concat_from_sequence_i32_new_axis_default; +mod concat_from_sequence_i8_new_axis_zero; +mod concat_from_sequence_i8_new_axis_one; +mod concat_from_sequence_i8_new_axis_default; +mod concat_from_sequence_u32_new_axis_zero; +mod concat_from_sequence_u32_new_axis_one; +mod concat_from_sequence_u32_new_axis_default; +mod is_nan_fp16x16; +mod is_nan_fp8x23; +mod is_inf_fp16x16; +mod is_inf_fp8x23; +mod is_inf_i32; +mod is_inf_i8; +mod is_inf_u32; +mod is_pos_inf_fp16x16; +mod is_neg_inf_fp16x16; +mod is_pos_inf_fp8x23; +mod is_neg_inf_fp8x23; +mod is_pos_inf_i32; +mod is_neg_inf_i32; +mod is_pos_inf_i8; +mod is_neg_inf_i8; +mod reduce_log_sum_fp8x23_export_do_not_keepdims; +mod reduce_log_sum_fp8x23_export_keepdims; +mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; +mod reduce_log_sum_fp16x16_export_do_not_keepdims; +mod reduce_log_sum_fp16x16_export_keepdims; +mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; +mod and_bool; +mod erf_fp16x16; +mod erf_fp8x23; +mod unique_fp16x16_without_axis_sorted; +mod unique_fp16x16_with_axis_zero_sorted; +mod unique_u32_without_axis_sorted; +mod unique_u32_without_axis_not_sorted; +mod unique_u32_with_axis_zero_sorted; +mod unique_u32_with_axis_zero_not_sorted; +mod unique_u32_with_axis_one_sorted; +mod unique_u32_with_axis_one_not_sorted; +mod gather_nd_fp16x16_3d_default; +mod gather_nd_fp16x16_3d_batch_dims1; +mod gather_nd_fp16x16_3d_batch_dims2; +mod gather_nd_fp8x23_3d_default; +mod gather_nd_fp8x23_3d_batch_dims1; +mod gather_nd_fp8x23_3d_batch_dims2; +mod gather_nd_i32_3d_default; +mod gather_nd_i32_3d_batch_dims1; +mod gather_nd_i32_3d_batch_dims2; +mod gather_nd_i8_3d_default; +mod gather_nd_i8_3d_batch_dims1; +mod gather_nd_u32_default; +mod gather_nd_u32_batch_dims1; +mod gather_nd_u32_batch_dims2; +mod resize_upsample_scales_nearest; +mod resize_downsample_scales_cubic; +mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; +mod resize_downsample_scales_cubic_align_corners; +mod resize_upsample_scales_linear; +mod resize_downsample_scales_linear_align_corners; +mod resize_downsample_scales_nearest; +mod resize_upsample_scales_cubic; +mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; +mod resize_upsample_scales_cubic_align_corners; +mod resize_upsample_scales_cubic_asymmetric; +mod resize_upsample_scales_linear_align_corners; +mod resize_upsample_sizes_nearest; +mod resize_upsample_sizes_cubic; +mod resize_downsample_sizes_cubic; +mod resize_downsample_sizes_nearest; +mod resize_upsample_scales_linear_half_pixel_symmetric; +mod resize_downsample_scales_cubic_antialias; +mod resize_downsample_scales_linear_antialias; +mod resize_downsample_sizes_cubic_antialias; +mod resize_downsample_sizes_linear_pytorch_half_pixel; +mod resize_tf_crop_and_resize; +mod resize_tf_crop_and_resize_extrapolation_value; +mod resize_upsample_scales_nearest_axes_2_3; +mod resize_upsample_scales_nearest_axes_3_2; +mod resize_upsample_sizes_nearest_axes_2_3; +mod resize_upsample_sizes_nearest_ceil_half_pixel; +mod resize_upsample_sizes_nearest_floor_align_corners; +mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; +mod resize_downsample_scales_linear_half_pixel_symmetric; +mod resize_downsample_sizes_nearest_not_larger; +mod resize_downsample_sizes_nearest_not_smaller; +mod resize_tf_crop_and_resize_axes_2_3; +mod resize_tf_crop_and_resize_axes_3_2; +mod resize_upsample_sizes_nearest_axes_3_2; +mod resize_upsample_sizes_nearest_not_larger; +mod resize_upsample_sizes_nearest_not_smaller; +mod compress_fp16x16_3d_default; +mod compress_fp16x16_3d_axis1; +mod compress_fp16x16_3d_axis2; +mod compress_fp16x16_3d_axis3; +mod compress_fp16x16_3d_noaxis; +mod compress_fp8x23_3d_default; +mod compress_fp8x23_3d_axis1; +mod compress_fp8x23_3d_axis2; +mod compress_i32_3d_default; +mod compress_i32_3d_axis1; +mod compress_i32_3d_axis2; +mod compress_i8_3d_default; +mod compress_i8_3d_axis1; +mod compress_i8_3d_axis2; +mod compress_u32_3d_default; +mod compress_u32_3d_axis1; +mod compress_u32_3d_axis2; +mod compress_u32_3d_axis2_2; +mod compress_u32_3d_axis3; +mod reduce_log_sum_exp_fp32x32_export_do_not_keepdims; +mod reduce_log_sum_exp_fp32x32_export_keepdims; +mod reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims; +mod layer_normalization_default_axis; +mod layer_normalization_4d_axis0; +mod layer_normalization_4d_axis1; +mod layer_normalization_4d_axis2; +mod layer_normalization_4d_axis3; +mod layer_normalization_3d_axis0_epsilon; +mod layer_normalization_3d_axis_negative_3_epsilon; +mod layer_normalization_3d_axis1_epsilon; +mod layer_normalization_3d_axis2_epsilon; +mod layer_normalization_4d_axis_negative_4; +mod layer_normalization_4d_axis_negative_3; +mod layer_normalization_4d_axis_negative_2; +mod layer_normalization_4d_axis_negative_1; +mod layer_normalization_3d_axis_negative_2_epsilon; +mod layer_normalization_3d_axis_negative_1_epsilon; +mod layer_normalization_test; +mod split_u32_1d_equal_parts; +mod split_u32_2d_equal_parts; +mod split_u32_zero_size; +mod split_u32_1d_variable_parts; +mod split_u32_2d_variable_parts; +mod split_u32_1d_uneven; +mod split_u32_2d_uneven; +mod split_fp16x16_1d_equal_parts; +mod split_fp16x16_1d_variable_parts; +mod split_fp16x16_2d_equal_parts; +mod split_fp16x16_2d_variable_parts; +mod split_fp16x16_zero_size; +mod split_fp16x16_1d_uneven; +mod split_fp16x16_2d_uneven; +mod grid_sample; +mod grid_sample_cubic; +mod grid_sample_aligncorners; +mod grid_sample_nearest; +mod grid_sample_nearest_aligncorner; +mod grid_sample_padding_border; +mod grid_sample_padding_reflection; +mod grid_sample_padding_zeros; +mod col2im; +mod col2im_5D; +mod col2im_dilations; +mod col2im_pads; +mod col2im_strides; +mod random_uniform_like_fp16x16; +mod random_uniform_like_fp8x23; +mod range_fp8x23; +mod range_fp16x16; +mod range_i32; +mod range_i8; +mod range_u32; +mod hann_window_fp8x23; +mod hann_window_fp16x16; +mod hamming_window_fp16x16; +mod hamming_window_fp8x23; +mod blackman_window_fp16x16; +mod blackman_window_fp8x23; +mod split_to_sequence_fp16x16_1d_equal_parts; +mod split_to_sequence_fp16x16_1d_variable_parts; +mod split_to_sequence_fp16x16_2d_equal_parts; +mod split_to_sequence_fp16x16_2d_variable_parts; +mod split_to_sequence_fp16x16_zero_size; +mod split_to_sequence_fp16x16_1d_uneven; +mod split_to_sequence_fp16x16_2d_uneven; +mod split_to_sequence_u32_1d_equal_parts; +mod split_to_sequence_u32_1d_variable_parts; +mod split_to_sequence_u32_2d_equal_parts; +mod split_to_sequence_u32_2d_variable_parts; +mod split_to_sequence_u32_zero_size; +mod split_to_sequence_u32_1d_uneven; +mod split_to_sequence_u32_2d_uneven; +mod split_to_sequence_2d_scalar; +mod split_to_sequence_2d_nokeepdims; +mod split_to_sequence_1d_nokeepdims; +mod reverse_sequence_fp16x16_batch_equal_parts; +mod reverse_sequence_fp16x16_time_equal_parts; +mod reverse_sequence_i32_batch_equal_parts; +mod reverse_sequence_i32_time_equal_parts; +mod reverse_sequence_i8_batch_equal_parts; +mod reverse_sequence_i8_time_equal_parts; +mod reverse_sequence_u32_4x4_batch; +mod reverse_sequence_u32_4x4_time; +mod reverse_sequence_u32_3x3_batch; +mod reverse_sequence_u32_3x3_time; +mod reverse_sequence_different_dimensions_4_5; +mod reverse_sequence_different_dimensions_2_4; +mod reverse_sequence_different_dimensions_1_6; +mod reverse_sequence_different_dimensions_3x9_batch; +mod reverse_sequence_different_dimensions_3x9_time; +mod conv_transpose; +mod conv_transpose_1d; +mod conv_transpose_3d; +mod conv_transpose_attributes; +mod conv_transpose_autopad_same; +mod conv_transpose_dilations; +mod conv_transpose_pads; +mod conv_transpose_group_2; +mod conv_transpose_group_2_image_3; +mod depth_to_space_fp16x16; +mod depth_to_space_fp8x23; +mod depth_to_space_i32; +mod depth_to_space_i8; +mod depth_to_space_u32; +mod space_to_depth_fp16x16; +mod space_to_depth_fp8x23; +mod space_to_depth_i32; +mod space_to_depth_i8; +mod space_to_depth_u32; +mod scatter_nd_fp16x16_3d_default; +mod scatter_nd_fp16x16_3d_add; +mod scatter_nd_fp16x16_3d_mul; +mod scatter_nd_fp16x16_3d_max; +mod scatter_nd_fp16x16_3d_min; +mod scatter_nd_fp8x23_3d_default; +mod scatter_nd_fp8x23_3d_add; +mod scatter_nd_fp8x23_3d_mul; +mod scatter_nd_fp8x23_3d_max; +mod scatter_nd_fp8x23_3d_min; +mod scatter_nd_u32_default; +mod scatter_nd_u32_add; +mod scatter_nd_u32_mul; +mod scatter_nd_u32_max; +mod scatter_nd_u32_min; +mod conv_2D_with_padding; +mod conv_1D_no_padding; +mod conv_1D_with_padding; +mod conv_3D_no_padding; +mod conv_3D_with_padding; +mod conv_4D_no_padding; +mod conv_2D_with_2_groups; +mod conv_2D_with_autopad_same; +mod conv_2D_with_strides_asymmetric_padding; +mod conv_2D_with_strides_with_padding; +mod conv_4D_with_padding; +mod label_encoder_fp16x16_3d_default; +mod label_encoder_fp8x23_default; +mod label_encoder_i8_default; +mod label_encoder_i32_default; +mod label_encoder_u32_default; mod gather_fp16x16_3d_default; mod gather_fp16x16_3d_axis1; mod gather_fp16x16_3d_axis2; From f9d0603bde0244887b956507fb777f9fbe15dd81 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 25 Mar 2024 09:50:14 +0100 Subject: [PATCH 57/68] update tests --- nodegen/node/less.py | 20 +- tests/lib.cairo | 12 +- tests/nodes.cairo | 1954 ++++++++--------- tests/nodes/less_fp16x16.cairo | 8 +- tests/nodes/less_fp16x16/input_0.cairo | 24 +- tests/nodes/less_fp16x16/input_1.cairo | 24 +- tests/nodes/less_fp16x16/output_0.cairo | 59 +- tests/nodes/less_fp16x16_broadcast.cairo | 8 +- .../less_fp16x16_broadcast/input_0.cairo | 28 +- .../less_fp16x16_broadcast/input_1.cairo | 4 +- .../less_fp16x16_broadcast/output_0.cairo | 59 +- tests/nodes/less_fp8x23.cairo | 10 +- tests/nodes/less_fp8x23/input_0.cairo | 32 +- tests/nodes/less_fp8x23/input_1.cairo | 28 +- tests/nodes/less_fp8x23/output_0.cairo | 59 +- tests/nodes/less_fp8x23_broadcast.cairo | 10 +- .../nodes/less_fp8x23_broadcast/input_0.cairo | 24 +- .../nodes/less_fp8x23_broadcast/input_1.cairo | 6 +- .../less_fp8x23_broadcast/output_0.cairo | 59 +- tests/nodes/less_i32.cairo | 8 +- tests/nodes/less_i32/input_0.cairo | 28 +- tests/nodes/less_i32/input_1.cairo | 28 +- tests/nodes/less_i32/output_0.cairo | 59 +- tests/nodes/less_i32_broadcast.cairo | 8 +- tests/nodes/less_i32_broadcast/input_0.cairo | 30 +- tests/nodes/less_i32_broadcast/input_1.cairo | 4 +- tests/nodes/less_i32_broadcast/output_0.cairo | 59 +- tests/nodes/less_i8.cairo | 8 +- tests/nodes/less_i8/input_0.cairo | 30 +- tests/nodes/less_i8/input_1.cairo | 28 +- tests/nodes/less_i8/output_0.cairo | 59 +- tests/nodes/less_i8_broadcast.cairo | 8 +- tests/nodes/less_i8_broadcast/input_0.cairo | 24 +- tests/nodes/less_i8_broadcast/input_1.cairo | 4 +- tests/nodes/less_i8_broadcast/output_0.cairo | 59 +- tests/nodes/less_u32.cairo | 8 +- tests/nodes/less_u32/input_0.cairo | 22 +- tests/nodes/less_u32/input_1.cairo | 30 +- tests/nodes/less_u32/output_0.cairo | 59 +- tests/nodes/less_u32_broadcast.cairo | 8 +- tests/nodes/less_u32_broadcast/input_0.cairo | 26 +- tests/nodes/less_u32_broadcast/input_1.cairo | 4 +- tests/nodes/less_u32_broadcast/output_0.cairo | 59 +- 43 files changed, 1547 insertions(+), 1541 deletions(-) diff --git a/nodegen/node/less.py b/nodegen/node/less.py index 14af93201..452ea2732 100644 --- a/nodegen/node/less.py +++ b/nodegen/node/less.py @@ -13,7 +13,7 @@ def default(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_u32" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -25,7 +25,7 @@ def broadcast(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_u32_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -42,7 +42,7 @@ def default(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_i32" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -54,7 +54,7 @@ def broadcast(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_i32_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -71,7 +71,7 @@ def default(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_i8" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -83,7 +83,7 @@ def broadcast(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_i8_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -102,7 +102,7 @@ def default(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_fp8x23" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -116,7 +116,7 @@ def broadcast(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_fp8x23_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -135,7 +135,7 @@ def default(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_fp16x16" make_test([x, y], z, "input_0.less(@input_1)", name) @@ -149,7 +149,7 @@ def broadcast(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.BOOL, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_fp16x16_broadcast" make_test([x, y], z, "input_0.less(@input_1)", name) diff --git a/tests/lib.cairo b/tests/lib.cairo index eb58139db..f5cecb77d 100644 --- a/tests/lib.cairo +++ b/tests/lib.cairo @@ -1,7 +1,7 @@ -// mod numbers; -// mod performance; -// mod tensor_core; -// mod nodes; -// mod ml; -// mod operators; +mod numbers; +mod performance; +mod tensor_core; +mod nodes; +mod ml; +mod operators; diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 244d8b0c9..15249bc0d 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1,950 +1,950 @@ -mod abs_fp16x16; -mod abs_fp8x23; -mod abs_i32; -mod abs_i8; -mod acos_fp16x16; -mod acos_fp8x23; -mod acosh_fp16x16; -mod acosh_fp8x23; -mod add_fp16x16; -mod add_fp16x16_broadcast; -mod add_fp8x23; -mod add_fp8x23_broadcast; -mod add_i32; -mod add_i32_broadcast; -mod add_i8; -mod add_i8_broadcast; -mod add_u32; -mod add_u32_broadcast; -mod argmin_fp16x16_1D_default; -mod argmin_fp16x16_1D_keepdims_false; -mod argmin_fp16x16_1D_last_index; -mod argmin_fp16x16_2D_default; -mod argmin_fp16x16_2D_keepdims_false; -mod argmin_fp16x16_2D_last_index; -mod argmin_fp16x16_3D_default; -mod argmin_fp16x16_3D_keepdims_false; -mod argmin_fp16x16_3D_last_index; -mod argmin_fp8x23_1D_default; -mod argmin_fp8x23_1D_keepdims_false; -mod argmin_fp8x23_1D_last_index; -mod argmin_fp8x23_2D_default; -mod argmin_fp8x23_2D_keepdims_false; -mod argmin_fp8x23_2D_last_index; -mod argmin_fp8x23_3D_default; -mod argmin_fp8x23_3D_keepdims_false; -mod argmin_fp8x23_3D_last_index; -mod argmin_i32_1D_default; -mod argmin_i32_1D_keepdims_false; -mod argmin_i32_1D_last_index; -mod argmin_i32_2D_default; -mod argmin_i32_2D_keepdims_false; -mod argmin_i32_2D_last_index; -mod argmin_i32_3D_default; -mod argmin_i32_3D_keepdims_false; -mod argmin_i32_3D_last_index; -mod argmin_i8_1D_default; -mod argmin_i8_1D_keepdims_false; -mod argmin_i8_1D_last_index; -mod argmin_i8_2D_default; -mod argmin_i8_2D_keepdims_false; -mod argmin_i8_2D_last_index; -mod argmin_i8_3D_default; -mod argmin_i8_3D_keepdims_false; -mod argmin_i8_3D_last_index; -mod argmin_u32_1D_default; -mod argmin_u32_1D_keepdims_false; -mod argmin_u32_1D_last_index; -mod argmin_u32_2D_default; -mod argmin_u32_2D_keepdims_false; -mod argmin_u32_2D_last_index; -mod argmin_u32_3D_default; -mod argmin_u32_3D_keepdims_false; -mod argmin_u32_3D_last_index; -mod asin_fp16x16; -mod asin_fp8x23; -mod asinh_fp16x16; -mod asinh_fp8x23; -mod atan_fp16x16; -mod atan_fp8x23; -mod ceil_fp16x16; -mod ceil_fp8x23; -mod concat_fp16x16_1d; -mod concat_fp16x16_2d; -mod concat_fp16x16_3d_default; -mod concat_fp16x16_3d_axis_1; -mod concat_fp16x16_3d_axis_2; -mod concat_fp16x16_3d_three_tensors_axis_1; -mod concat_fp16x16_3d_three_tensors_axis_2; -mod concat_fp8x23_1d; -mod concat_fp8x23_2d; -mod concat_fp8x23_3d_default; -mod concat_fp8x23_3d_axis_1; -mod concat_fp8x23_3d_axis_2; -mod concat_fp8x23_3d_three_tensors_axis_1; -mod concat_fp8x23_3d_three_tensors_axis_2; -mod concat_i32_1d; -mod concat_i32_2d; -mod concat_i32_3d_default; -mod concat_i32_3d_axis_1; -mod concat_i32_3d_axis_2; -mod concat_i32_3d_three_tensors_axis_1; -mod concat_i32_3d_three_tensors_axis_2; -mod concat_i8_1d; -mod concat_i8_2d; -mod concat_i8_3d_default; -mod concat_i8_3d_axis_1; -mod concat_i8_3d_axis_2; -mod concat_i8_3d_three_tensors_axis_1; -mod concat_i8_3d_three_tensors_axis_2; -mod concat_u32_1d; -mod concat_u32_2d; -mod concat_u32_3d_default; -mod concat_u32_3d_axis_1; -mod concat_u32_3d_axis_2; -mod concat_u32_3d_three_tensors_axis_1; -mod concat_u32_3d_three_tensors_axis_2; -mod cos_fp16x16; -mod cos_fp8x23; -mod cosh_fp16x16; -mod cosh_fp8x23; -mod cumsum_fp16x16_1d_default; -mod cumsum_fp16x16_1d_exclusive; -mod cumsum_fp16x16_1d_reverse; -mod cumsum_fp16x16_1d_reverse_exclusive; -mod cumsum_fp16x16_2d_axis_0; -mod cumsum_fp16x16_2d_axis_1; -mod cumsum_fp8x23_1d_default; -mod cumsum_fp8x23_1d_exclusive; -mod cumsum_fp8x23_1d_reverse; -mod cumsum_fp8x23_1d_reverse_exclusive; -mod cumsum_fp8x23_2d_axis_0; -mod cumsum_fp8x23_2d_axis_1; -mod cumsum_i32_1d_default; -mod cumsum_i32_1d_exclusive; -mod cumsum_i32_1d_reverse; -mod cumsum_i32_1d_reverse_exclusive; -mod cumsum_i32_2d_axis_0; -mod cumsum_i32_2d_axis_1; -mod cumsum_i8_1d_default; -mod cumsum_i8_1d_exclusive; -mod cumsum_i8_1d_reverse; -mod cumsum_i8_1d_reverse_exclusive; -mod cumsum_i8_2d_axis_0; -mod cumsum_i8_2d_axis_1; -mod cumsum_u32_1d_default; -mod cumsum_u32_1d_exclusive; -mod cumsum_u32_1d_reverse; -mod cumsum_u32_1d_reverse_exclusive; -mod cumsum_u32_2d_axis_0; -mod cumsum_u32_2d_axis_1; -mod div_fp16x16; -mod div_fp16x16_broadcast; -mod div_fp8x23; -mod div_fp8x23_broadcast; -mod div_i32; -mod div_i32_broadcast; -mod div_i8; -mod div_i8_broadcast; -mod div_u32; -mod div_u32_broadcast; -mod equal_fp16x16; -mod equal_fp16x16_broadcast; -mod equal_fp8x23; -mod equal_fp8x23_broadcast; -mod equal_i32; -mod equal_i32_broadcast; -mod equal_i8; -mod equal_i8_broadcast; -mod equal_u32; -mod equal_u32_broadcast; -mod exp_fp16x16; -mod exp_fp8x23; -mod less_equal_fp16x16; -mod less_equal_fp16x16_broadcast; -mod less_equal_fp8x23; -mod less_equal_fp8x23_broadcast; -mod less_equal_i32; -mod less_equal_i32_broadcast; -mod less_equal_i8; -mod less_equal_i8_broadcast; -mod less_equal_u32; -mod less_equal_u32_broadcast; -mod greater_fp16x16; -mod greater_fp16x16_broadcast; -mod greater_fp8x23; -mod greater_fp8x23_broadcast; -mod greater_i32; -mod greater_i32_broadcast; -mod greater_i8; -mod greater_i8_broadcast; -mod greater_u32; -mod greater_u32_broadcast; -mod leaky_relu_fp16x16; -mod leaky_relu_fp8x23; -mod linear_fp16x16; -mod linear_fp8x23; -mod linear_i32; -mod linear_i8; -mod linear_u32; -mod log_fp16x16; -mod log_fp8x23; -mod logsoftmax_fp16x16_axis_0; -mod logsoftmax_fp16x16_axis_1; -mod logsoftmax_fp8x23_axis_0; -mod logsoftmax_fp8x23_axis_1; -mod matmul_fp16x16_1d; -mod matmul_fp16x16_2x2; -mod matmul_fp16x16_2x1; -mod matmul_fp16x16_1x2; -mod matmul_fp8x23_1d; -mod matmul_fp8x23_2x2; -mod matmul_fp8x23_2x1; -mod matmul_fp8x23_1x2; -mod matmul_i32_1d; -mod matmul_i32_2x2; -mod matmul_i32_2x1; -mod matmul_i32_1x2; -mod matmul_i8_1d; -mod matmul_i8_2x2; -mod matmul_i8_2x1; -mod matmul_i8_1x2; -mod matmul_u32_1d; -mod matmul_u32_2x2; -mod matmul_u32_2x1; -mod matmul_u32_1x2; -mod mul_fp16x16; -mod mul_fp16x16_broadcast; -mod mul_fp8x23; -mod mul_fp8x23_broadcast; -mod mul_i32; -mod mul_i32_broadcast; -mod mul_i8; -mod mul_i8_broadcast; -mod mul_u32; -mod mul_u32_broadcast; -mod or_fp16x16; -mod or_fp16x16_broadcast; -mod or_fp8x23; -mod or_fp8x23_broadcast; -mod or_i32; -mod or_i32_broadcast; -mod or_i8; -mod or_i8_broadcast; -mod or_u32; -mod or_u32_broadcast; -mod relu_fp16x16; -mod relu_fp8x23; -mod relu_i32; -mod relu_i8; -mod sigmoid_fp16x16; -mod sigmoid_fp8x23; -mod sin_fp16x16; -mod sin_fp8x23; -mod sinh_fp16x16; -mod sinh_fp8x23; -mod softplus_fp8x23; -mod softplus_fp16x16; -mod softsign_fp8x23; -mod softsign_fp16x16; -mod sqrt_fp16x16; -mod sqrt_fp8x23; -mod sub_fp16x16; -mod sub_fp16x16_broadcast; -mod sub_fp8x23; -mod sub_fp8x23_broadcast; -mod sub_i32; -mod sub_i32_broadcast; -mod sub_i8; -mod sub_i8_broadcast; -mod sub_u32; -mod sub_u32_broadcast; -mod tanh_fp16x16; -mod tanh_fp8x23; -mod transpose_fp16x16_2d; -mod transpose_fp16x16_3d; -mod transpose_fp8x23_2d; -mod transpose_fp8x23_3d; -mod transpose_i32_2d; -mod transpose_i32_3d; -mod transpose_i8_2d; -mod transpose_i8_3d; -mod transpose_u32_2d; -mod transpose_u32_3d; -mod xor_fp16x16; -mod xor_fp16x16_broadcast; -mod xor_fp8x23; -mod xor_fp8x23_broadcast; -mod xor_i32; -mod xor_i32_broadcast; -mod xor_i8; -mod xor_i8_broadcast; -mod xor_u32; -mod xor_u32_broadcast; -mod greater_equal_fp16x16; -mod greater_equal_fp16x16_broadcast; -mod greater_equal_fp8x23; -mod greater_equal_fp8x23_broadcast; -mod greater_equal_i32; -mod greater_equal_i32_broadcast; -mod greater_equal_i8; -mod greater_equal_i8_broadcast; -mod greater_equal_u32; -mod greater_equal_u32_broadcast; -mod slice_fp16x16_2d; -mod slice_fp16x16_3d; -mod slice_fp8x23_2d; -mod slice_fp8x23_3d; -mod slice_i32_2d; -mod slice_i32_3d; -mod slice_i8_2d; -mod slice_i8_3d; -mod slice_u32_2d; -mod slice_u32_3d; -mod nonzero_fp16x16_2d; -mod nonzero_fp16x16_3d; -mod nonzero_fp8x23_2d; -mod nonzero_fp8x23_3d; -mod nonzero_i32_2d; -mod nonzero_i32_3d; -mod nonzero_i8_2d; -mod nonzero_i8_3d; -mod nonzero_u32_2d; -mod nonzero_u32_3d; -mod squeeze_fP16x16; -mod squeeze_fP8x23; -mod squeeze_i32; -mod squeeze_i8; -mod squeeze_u32; -mod unsqueeze_fp16x16_2d; -mod unsqueeze_fp16x16_3d; -mod unsqueeze_fp8x23_2d; -mod unsqueeze_fp8x23_3d; -mod unsqueeze_i32_2d; -mod unsqueeze_i32_3d; -mod unsqueeze_i8_2d; -mod unsqueeze_i8_3d; -mod unsqueeze_u32_2d; -mod unsqueeze_u32_3d; -mod sign_fP16x16; -mod sign_fP8x23; -mod sign_fail; -mod sign_i32; -mod sign_i8; -mod clip_fp16x16_2d; -mod clip_fp16x16_3d; -mod clip_fp8x23_2d; -mod clip_fp8x23_3d; -mod clip_i32_2d; -mod clip_i32_3d; -mod clip_i8_2d; -mod clip_i8_3d; -mod clip_u32_2d; -mod clip_u32_3d; -mod identity_fP16x16; -mod identity_fP8x23; -mod identity_i32; -mod identity_i8; -mod identity_u32; -mod thresholded_relu_fp16x16; -mod thresholded_relu_fp8x23; -mod hard_sigmoid_fp8x23; -mod hard_sigmoid_fp16x16; -mod neg_fp16x16; -mod neg_fp8x23; -mod neg_i32; -mod neg_i8; -mod gemm_all_attributes; -mod gemm_alpha; -mod gemm_beta; -mod gemm_default_matrix_bias; -mod gemm_default_vector_bias; -mod gemm_default_no_bias; -mod gemm_transposeA; -mod gemm_transposeB; -mod min_fp16x16_three_tensors; -mod min_fp16x16_broadcast_three_tensors; -mod min_fp16x16_two_tensors; -mod min_fp16x16_broadcast_two_tensors; -mod min_fp8x23_three_tensors; -mod min_fp8x23_broadcast_three_tensors; -mod min_fp8x23_two_tensors; -mod min_fp8x23_broadcast_two_tensors; -mod min_i32_three_tensors; -mod min_i32_broadcast_three_tensors; -mod min_i32_two_tensors; -mod min_i32_broadcast_two_tensors; -mod min_i8_three_tensors; -mod min_i8_broadcast_three_tensors; -mod min_i8_two_tensors; -mod min_i8_broadcast_two_tensors; -mod min_u32_three_tensors; -mod min_u32_broadcast_three_tensors; -mod min_u32_two_tensors; -mod min_u32_broadcast_two_tensors; -mod where_fp16x16; -mod where_fp16x16_broadcast; -mod where_fp8x23; -mod where_fp8x23_broadcast; -mod where_i32; -mod where_i32_broadcast; -mod where_i8; -mod where_i8_broadcast; -mod where_u32; -mod where_u32_broadcast; -mod not_bool; -mod round_fp16x16; -mod round_fp8x23; -mod max_fp16x16_three_tensors; -mod max_fp16x16_broadcast_three_tensors; -mod max_fp16x16_two_tensors; -mod max_fp16x16_broadcast_two_tensors; -mod max_fp8x23_three_tensors; -mod max_fp8x23_broadcast_three_tensors; -mod max_fp8x23_two_tensors; -mod max_fp8x23_broadcast_two_tensors; -mod max_i32_three_tensors; -mod max_i32_broadcast_three_tensors; -mod max_i32_two_tensors; -mod max_i32_broadcast_two_tensors; -mod max_i8_three_tensors; -mod max_i8_broadcast_three_tensors; -mod max_i8_two_tensors; -mod max_i8_broadcast_two_tensors; -mod max_u32_three_tensors; -mod max_u32_broadcast_three_tensors; -mod max_u32_two_tensors; -mod max_u32_broadcast_two_tensors; -mod scatter_fp16x16_3d_default; -mod scatter_fp16x16_3d_axis1; -mod scatter_fp16x16_3d_axis1_add; -mod scatter_fp8x23_default; -mod scatter_fp8x23_axis1; -mod scatter_fp8x23_mul; -mod scatter_i8_default; -mod scatter_i8_axis1; -mod scatter_i8_axis1_max; -mod scatter_u32_default; -mod scatter_u32_axis1; -mod scatter_u32_add; -mod array_feature_extractor_1D_i32; -mod array_feature_extractor_1D_fp8x23; -mod array_feature_extractor_1D_fp16x16; -mod array_feature_extractor_2D_i32; -mod array_feature_extractor_2D_fp8x23; -mod array_feature_extractor_2D_fp16x16; -mod array_feature_extractor_3D_i32; -mod array_feature_extractor_3D_fp8x23; -mod array_feature_extractor_3D_fp16x16; -mod binarizer_fp16x16; -mod binarizer_fp8x23; -mod tril_fp16x16; -mod tril_fp16x16_neg; -mod tril_fp16x16_one_row; -mod tril_fp16x16_out_neg; -mod tril_fp16x16_out_pos; -mod tril_fp16x16_pos; -mod tril_fp16x16_square; -mod tril_fp16x16_square_neg; -mod tril_fp16x16_zero; -mod triu_fp16x16; -mod triu_fp16x16_neg; -mod triu_fp16x16_one_row; -mod triu_fp16x16_out_neg; -mod triu_fp16x16_out_pos; -mod triu_fp16x16_pos; -mod triu_fp16x16_square; -mod triu_fp16x16_square_neg; -mod triu_fp16x16_zero; -mod tril_fp8x23; -mod tril_fp8x23_neg; -mod tril_fp8x23_one_row; -mod tril_fp8x23_out_neg; -mod tril_fp8x23_out_pos; -mod tril_fp8x23_pos; -mod tril_fp8x23_square; -mod tril_fp8x23_square_neg; -mod tril_fp8x23_zero; -mod triu_fp8x23; -mod triu_fp8x23_neg; -mod triu_fp8x23_one_row; -mod triu_fp8x23_out_neg; -mod triu_fp8x23_out_pos; -mod triu_fp8x23_pos; -mod triu_fp8x23_square; -mod triu_fp8x23_square_neg; -mod triu_fp8x23_zero; -mod tril_i32; -mod tril_neg_i32; -mod tril_i32_one_row; -mod tril_i32_out_neg; -mod tril_i32_out_pos; -mod tril_i32_pos; -mod tril_i32_square; -mod tril_i32_square_neg; -mod tril_i32_zero; -mod triu_i32; -mod triu_i32_neg; -mod triu_i32_one_row; -mod triu_i32_out_neg; -mod triu_i32_out_pos; -mod triu_i32_pos; -mod triu_i32_square; -mod triu_i32_square_neg; -mod triu_i32_zero; -mod tril_i8; -mod tril_i8_neg; -mod tril_i8_one_row; -mod tril_i8_out_neg; -mod tril_i8_out_pos; -mod tril_i8_pos; -mod tril_i8_square; -mod tril_i8_square_neg; -mod tril_i8_zero; -mod triu_i8; -mod triu_i8_neg; -mod triu_i8_one_row; -mod triu_i8_out_neg; -mod triu_i8_out_pos; -mod triu_i8_pos; -mod triu_i8_square; -mod triu_i8_square_neg; -mod triu_i8_zero; -mod tril_u32; -mod tril_u32_neg; -mod tril_u32_one_row; -mod tril_u32_out_neg; -mod tril_u32_out_pos; -mod tril_u32_pos; -mod tril_u32_square; -mod tril_u32_square_neg; -mod tril_u32_zero; -mod triu_u32; -mod triu_u32_neg; -mod triu_u32_one_row; -mod triu_u32_out_neg; -mod triu_u32_out_pos; -mod triu_u32_pos; -mod triu_u32_square; -mod triu_u32_square_neg; -mod triu_u32_zero; -mod reduce_sum_square_fp16x16_export_do_not_keepdims; -mod reduce_sum_square_fp16x16_export_keepdims; -mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; -mod reduce_sum_square_fp8x23_export_do_not_keepdims; -mod reduce_sum_square_fp8x23_export_keepdims; -mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; -mod reduce_sum_square_i32_export_do_not_keepdims; -mod reduce_sum_square_i32_export_keepdims; -mod reduce_sum_square_i32_export_negative_axes_keepdims; -mod reduce_sum_square_i8_export_do_not_keepdims; -mod reduce_sum_square_i8_export_keepdims; -mod reduce_sum_square_i8_export_negative_axes_keepdims; -mod reduce_sum_square_u32_export_do_not_keepdims; -mod reduce_sum_square_u32_export_keepdims; -mod reduce_sum_square_u32_export_negative_axes_keepdims; -mod reduce_l2_fp16x16_export_do_not_keepdims; -mod reduce_l2_fp16x16_export_keepdims; -mod reduce_l2_fp16x16_export_negative_axes_keepdims; -mod reduce_l2_fp8x23_export_do_not_keepdims; -mod reduce_l2_fp8x23_export_keepdims; -mod reduce_l2_fp8x23_export_negative_axes_keepdims; -mod reduce_l1_fp16x16_export_do_not_keepdims; -mod reduce_l1_fp16x16_export_keepdims; -mod reduce_l1_fp16x16_export_negative_axes_keepdims; -mod reduce_l1_fp8x23_export_do_not_keepdims; -mod reduce_l1_fp8x23_export_keepdims; -mod reduce_l1_fp8x23_export_negative_axes_keepdims; -mod reduce_l1_i32_export_do_not_keepdims; -mod reduce_l1_i32_export_keepdims; -mod reduce_l1_i32_export_negative_axes_keepdims; -mod reduce_l1_i8_export_do_not_keepdims; -mod reduce_l1_i8_export_keepdims; -mod reduce_l1_i8_export_negative_axes_keepdims; -mod reduce_l1_u32_export_do_not_keepdims; -mod reduce_l1_u32_export_keepdims; -mod reduce_l1_u32_export_negative_axes_keepdims; -mod reduce_prod_fp16x16_1D; -mod reduce_prod_fp16x16_2D_default; -mod reduce_prod_fp16x16_2D_keepdims; -mod reduce_prod_fp16x16_2D_axis_1; -mod reduce_prod_fp8x23_1D; -mod reduce_prod_fp8x23_2D_default; -mod reduce_prod_fp8x23_2D_keepdims; -mod reduce_prod_fp8x23_2D_axis_1; -mod reduce_prod_i32_1D; -mod reduce_prod_i32_2D_default; -mod reduce_prod_i32_2D_keepdims; -mod reduce_prod_i32_2D_axis_1; -mod reduce_prod_i8_1D; -mod reduce_prod_i8_2D_default; -mod reduce_prod_i8_2D_keepdims; -mod reduce_prod_i8_2D_axis_1; -mod reduce_prod_u32_1D; -mod reduce_prod_u32_2D_default; -mod reduce_prod_u32_2D_keepdims; -mod reduce_prod_u32_2D_axis_1; -mod sequence_length_fp16x16; -mod sequence_length_fp16x16_broadcast; -mod sequence_length_fp8x23; -mod sequence_length_fp8x23_broadcast; -mod sequence_length_i32; -mod sequence_length_i32_broadcast; -mod sequence_length_i8; -mod sequence_length_i8_broadcast; -mod sequence_length_u32; -mod sequence_length_u32_broadcast; -mod sequence_at_u32_positive; -mod sequence_at_u32_negative; -mod sequence_at_fp16x16_positive; -mod sequence_at_fp16x16_negative; -mod sequence_at_fp8x23_positive; -mod sequence_at_fp8x23_negative; -mod sequence_at_i32_positive; -mod sequence_at_i32_negative; -mod sequence_at_i8_positive; -mod sequence_at_i8_negative; -mod reduce_min_fp16x16_1D; -mod reduce_min_fp16x16_2D_default; -mod reduce_min_fp16x16_2D_keepdims; -mod reduce_min_fp16x16_2D_axis_1; -mod reduce_min_fp8x23_1D; -mod reduce_min_fp8x23_2D_default; -mod reduce_min_fp8x23_2D_keepdims; -mod reduce_min_fp8x23_2D_axis_1; -mod reduce_min_i32_1D; -mod reduce_min_i32_2D_default; -mod reduce_min_i32_2D_keepdims; -mod reduce_min_i32_2D_axis_1; -mod reduce_min_i8_1D; -mod reduce_min_i8_2D_default; -mod reduce_min_i8_2D_keepdims; -mod reduce_min_i8_2D_axis_1; -mod reduce_min_u32_1D; -mod reduce_min_u32_2D_default; -mod reduce_min_u32_2D_keepdims; -mod reduce_min_u32_2D_axis_1; -mod sequence_construct_fp16x16; -mod sequence_construct_fp8x23; -mod sequence_construct_i32; -mod sequence_construct_i8; -mod sequence_construct_u32; -mod shrink_hard_fp16x16; -mod shrink_soft_fp16x16; -mod shrink_hard_fp8x23; -mod shrink_soft_fp8x23; -mod sequence_empty_fp16x16; -mod sequence_empty_fp8x23; -mod sequence_empty_i32; -mod sequence_empty_i8; -mod sequence_empty_u32; -mod reduce_mean_fp16x16_1D; -mod reduce_mean_fp16x16_2D_default; -mod reduce_mean_fp16x16_2D_keepdims; -mod reduce_mean_fp16x16_2D_axis_1; -mod reduce_mean_fp8x23_1D; -mod reduce_mean_fp8x23_2D_default; -mod reduce_mean_fp8x23_2D_keepdims; -mod reduce_mean_fp8x23_2D_axis_1; -mod reduce_mean_i32_1D; -mod reduce_mean_i32_2D_default; -mod reduce_mean_i32_2D_keepdims; -mod reduce_mean_i32_2D_axis_1; -mod reduce_mean_i8_1D; -mod reduce_mean_i8_2D_default; -mod reduce_mean_i8_2D_keepdims; -mod reduce_mean_i8_2D_axis_1; -mod reduce_mean_u32_1D; -mod reduce_mean_u32_2D_default; -mod reduce_mean_u32_2D_keepdims; -mod reduce_mean_u32_2D_axis_1; -mod pow_fp16x16; -mod pow_fp16x16_broadcast; -mod pow_fp8x23; -mod pow_fp8x23_broadcast; -mod sequence_erase_u32_positive; -mod sequence_erase_u32_negative; -mod sequence_erase_u32_empty; -mod sequence_erase_fp16x16_positive; -mod sequence_erase_fp16x16_negative; -mod sequence_erase_fp16x16_empty; -mod sequence_erase_fp8x23_positive; -mod sequence_erase_fp8x23_negative; -mod sequence_erase_fp8x23_empty; -mod sequence_erase_i32_positive; -mod sequence_erase_i32_negative; -mod sequence_erase_i32_empty; -mod sequence_erase_i8_positive; -mod sequence_erase_i8_negative; -mod sequence_erase_i8_empty; -mod sequence_insert_fp16x16; -mod sequence_insert_fp8x23; -mod sequence_insert_i32; -mod sequence_insert_i8; -mod sequence_insert_u32; -mod concat_from_sequence_fp8x23_new_axis_zero; -mod concat_from_sequence_fp8x23_new_axis_one; -mod concat_from_sequence_fp8x23_new_axis_default; -mod concat_from_sequence_fp16x16_new_axis_zero; -mod concat_from_sequence_fp16x16_new_axis_one; -mod concat_from_sequence_fp16x16_new_axis_default; -mod concat_from_sequence_i32_new_axis_zero; -mod concat_from_sequence_i32_new_axis_one; -mod concat_from_sequence_i32_new_axis_default; -mod concat_from_sequence_i8_new_axis_zero; -mod concat_from_sequence_i8_new_axis_one; -mod concat_from_sequence_i8_new_axis_default; -mod concat_from_sequence_u32_new_axis_zero; -mod concat_from_sequence_u32_new_axis_one; -mod concat_from_sequence_u32_new_axis_default; -mod is_nan_fp16x16; -mod is_nan_fp8x23; -mod is_inf_fp16x16; -mod is_inf_fp8x23; -mod is_inf_i32; -mod is_inf_i8; -mod is_inf_u32; -mod is_pos_inf_fp16x16; -mod is_neg_inf_fp16x16; -mod is_pos_inf_fp8x23; -mod is_neg_inf_fp8x23; -mod is_pos_inf_i32; -mod is_neg_inf_i32; -mod is_pos_inf_i8; -mod is_neg_inf_i8; -mod reduce_log_sum_fp8x23_export_do_not_keepdims; -mod reduce_log_sum_fp8x23_export_keepdims; -mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; -mod reduce_log_sum_fp16x16_export_do_not_keepdims; -mod reduce_log_sum_fp16x16_export_keepdims; -mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; -mod and_bool; -mod erf_fp16x16; -mod erf_fp8x23; -mod unique_fp16x16_without_axis_sorted; -mod unique_fp16x16_with_axis_zero_sorted; -mod unique_u32_without_axis_sorted; -mod unique_u32_without_axis_not_sorted; -mod unique_u32_with_axis_zero_sorted; -mod unique_u32_with_axis_zero_not_sorted; -mod unique_u32_with_axis_one_sorted; -mod unique_u32_with_axis_one_not_sorted; -mod gather_nd_fp16x16_3d_default; -mod gather_nd_fp16x16_3d_batch_dims1; -mod gather_nd_fp16x16_3d_batch_dims2; -mod gather_nd_fp8x23_3d_default; -mod gather_nd_fp8x23_3d_batch_dims1; -mod gather_nd_fp8x23_3d_batch_dims2; -mod gather_nd_i32_3d_default; -mod gather_nd_i32_3d_batch_dims1; -mod gather_nd_i32_3d_batch_dims2; -mod gather_nd_i8_3d_default; -mod gather_nd_i8_3d_batch_dims1; -mod gather_nd_u32_default; -mod gather_nd_u32_batch_dims1; -mod gather_nd_u32_batch_dims2; -mod resize_upsample_scales_nearest; -mod resize_downsample_scales_cubic; -mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; -mod resize_downsample_scales_cubic_align_corners; -mod resize_upsample_scales_linear; -mod resize_downsample_scales_linear_align_corners; -mod resize_downsample_scales_nearest; -mod resize_upsample_scales_cubic; -mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; -mod resize_upsample_scales_cubic_align_corners; -mod resize_upsample_scales_cubic_asymmetric; -mod resize_upsample_scales_linear_align_corners; -mod resize_upsample_sizes_nearest; -mod resize_upsample_sizes_cubic; -mod resize_downsample_sizes_cubic; -mod resize_downsample_sizes_nearest; -mod resize_upsample_scales_linear_half_pixel_symmetric; -mod resize_downsample_scales_cubic_antialias; -mod resize_downsample_scales_linear_antialias; -mod resize_downsample_sizes_cubic_antialias; -mod resize_downsample_sizes_linear_pytorch_half_pixel; -mod resize_tf_crop_and_resize; -mod resize_tf_crop_and_resize_extrapolation_value; -mod resize_upsample_scales_nearest_axes_2_3; -mod resize_upsample_scales_nearest_axes_3_2; -mod resize_upsample_sizes_nearest_axes_2_3; -mod resize_upsample_sizes_nearest_ceil_half_pixel; -mod resize_upsample_sizes_nearest_floor_align_corners; -mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; -mod resize_downsample_scales_linear_half_pixel_symmetric; -mod resize_downsample_sizes_nearest_not_larger; -mod resize_downsample_sizes_nearest_not_smaller; -mod resize_tf_crop_and_resize_axes_2_3; -mod resize_tf_crop_and_resize_axes_3_2; -mod resize_upsample_sizes_nearest_axes_3_2; -mod resize_upsample_sizes_nearest_not_larger; -mod resize_upsample_sizes_nearest_not_smaller; -mod compress_fp16x16_3d_default; -mod compress_fp16x16_3d_axis1; -mod compress_fp16x16_3d_axis2; -mod compress_fp16x16_3d_axis3; -mod compress_fp16x16_3d_noaxis; -mod compress_fp8x23_3d_default; -mod compress_fp8x23_3d_axis1; -mod compress_fp8x23_3d_axis2; -mod compress_i32_3d_default; -mod compress_i32_3d_axis1; -mod compress_i32_3d_axis2; -mod compress_i8_3d_default; -mod compress_i8_3d_axis1; -mod compress_i8_3d_axis2; -mod compress_u32_3d_default; -mod compress_u32_3d_axis1; -mod compress_u32_3d_axis2; -mod compress_u32_3d_axis2_2; -mod compress_u32_3d_axis3; -mod reduce_log_sum_exp_fp32x32_export_do_not_keepdims; -mod reduce_log_sum_exp_fp32x32_export_keepdims; -mod reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims; -mod layer_normalization_default_axis; -mod layer_normalization_4d_axis0; -mod layer_normalization_4d_axis1; -mod layer_normalization_4d_axis2; -mod layer_normalization_4d_axis3; -mod layer_normalization_3d_axis0_epsilon; -mod layer_normalization_3d_axis_negative_3_epsilon; -mod layer_normalization_3d_axis1_epsilon; -mod layer_normalization_3d_axis2_epsilon; -mod layer_normalization_4d_axis_negative_4; -mod layer_normalization_4d_axis_negative_3; -mod layer_normalization_4d_axis_negative_2; -mod layer_normalization_4d_axis_negative_1; -mod layer_normalization_3d_axis_negative_2_epsilon; -mod layer_normalization_3d_axis_negative_1_epsilon; -mod layer_normalization_test; -mod split_u32_1d_equal_parts; -mod split_u32_2d_equal_parts; -mod split_u32_zero_size; -mod split_u32_1d_variable_parts; -mod split_u32_2d_variable_parts; -mod split_u32_1d_uneven; -mod split_u32_2d_uneven; -mod split_fp16x16_1d_equal_parts; -mod split_fp16x16_1d_variable_parts; -mod split_fp16x16_2d_equal_parts; -mod split_fp16x16_2d_variable_parts; -mod split_fp16x16_zero_size; -mod split_fp16x16_1d_uneven; -mod split_fp16x16_2d_uneven; -mod grid_sample; -mod grid_sample_cubic; -mod grid_sample_aligncorners; -mod grid_sample_nearest; -mod grid_sample_nearest_aligncorner; -mod grid_sample_padding_border; -mod grid_sample_padding_reflection; -mod grid_sample_padding_zeros; -mod col2im; -mod col2im_5D; -mod col2im_dilations; -mod col2im_pads; -mod col2im_strides; -mod random_uniform_like_fp16x16; -mod random_uniform_like_fp8x23; -mod range_fp8x23; -mod range_fp16x16; -mod range_i32; -mod range_i8; -mod range_u32; -mod hann_window_fp8x23; -mod hann_window_fp16x16; -mod hamming_window_fp16x16; -mod hamming_window_fp8x23; -mod blackman_window_fp16x16; -mod blackman_window_fp8x23; -mod split_to_sequence_fp16x16_1d_equal_parts; -mod split_to_sequence_fp16x16_1d_variable_parts; -mod split_to_sequence_fp16x16_2d_equal_parts; -mod split_to_sequence_fp16x16_2d_variable_parts; -mod split_to_sequence_fp16x16_zero_size; -mod split_to_sequence_fp16x16_1d_uneven; -mod split_to_sequence_fp16x16_2d_uneven; -mod split_to_sequence_u32_1d_equal_parts; -mod split_to_sequence_u32_1d_variable_parts; -mod split_to_sequence_u32_2d_equal_parts; -mod split_to_sequence_u32_2d_variable_parts; -mod split_to_sequence_u32_zero_size; -mod split_to_sequence_u32_1d_uneven; -mod split_to_sequence_u32_2d_uneven; -mod split_to_sequence_2d_scalar; -mod split_to_sequence_2d_nokeepdims; -mod split_to_sequence_1d_nokeepdims; -mod reverse_sequence_fp16x16_batch_equal_parts; -mod reverse_sequence_fp16x16_time_equal_parts; -mod reverse_sequence_i32_batch_equal_parts; -mod reverse_sequence_i32_time_equal_parts; -mod reverse_sequence_i8_batch_equal_parts; -mod reverse_sequence_i8_time_equal_parts; -mod reverse_sequence_u32_4x4_batch; -mod reverse_sequence_u32_4x4_time; -mod reverse_sequence_u32_3x3_batch; -mod reverse_sequence_u32_3x3_time; -mod reverse_sequence_different_dimensions_4_5; -mod reverse_sequence_different_dimensions_2_4; -mod reverse_sequence_different_dimensions_1_6; -mod reverse_sequence_different_dimensions_3x9_batch; -mod reverse_sequence_different_dimensions_3x9_time; -mod conv_transpose; -mod conv_transpose_1d; -mod conv_transpose_3d; -mod conv_transpose_attributes; -mod conv_transpose_autopad_same; -mod conv_transpose_dilations; -mod conv_transpose_pads; -mod conv_transpose_group_2; -mod conv_transpose_group_2_image_3; -mod depth_to_space_fp16x16; -mod depth_to_space_fp8x23; -mod depth_to_space_i32; -mod depth_to_space_i8; -mod depth_to_space_u32; -mod space_to_depth_fp16x16; -mod space_to_depth_fp8x23; -mod space_to_depth_i32; -mod space_to_depth_i8; -mod space_to_depth_u32; -mod scatter_nd_fp16x16_3d_default; -mod scatter_nd_fp16x16_3d_add; -mod scatter_nd_fp16x16_3d_mul; -mod scatter_nd_fp16x16_3d_max; -mod scatter_nd_fp16x16_3d_min; -mod scatter_nd_fp8x23_3d_default; -mod scatter_nd_fp8x23_3d_add; -mod scatter_nd_fp8x23_3d_mul; -mod scatter_nd_fp8x23_3d_max; -mod scatter_nd_fp8x23_3d_min; -mod scatter_nd_u32_default; -mod scatter_nd_u32_add; -mod scatter_nd_u32_mul; -mod scatter_nd_u32_max; -mod scatter_nd_u32_min; -mod conv_2D_with_padding; -mod conv_1D_no_padding; -mod conv_1D_with_padding; -mod conv_3D_no_padding; -mod conv_3D_with_padding; -mod conv_4D_no_padding; -mod conv_2D_with_2_groups; -mod conv_2D_with_autopad_same; -mod conv_2D_with_strides_asymmetric_padding; -mod conv_2D_with_strides_with_padding; -mod conv_4D_with_padding; -mod label_encoder_fp16x16_3d_default; -mod label_encoder_fp8x23_default; -mod label_encoder_i8_default; -mod label_encoder_i32_default; -mod label_encoder_u32_default; -mod gather_fp16x16_3d_default; -mod gather_fp16x16_3d_axis1; -mod gather_fp16x16_3d_axis2; -mod gather_negative_indices; -mod gather_negative_axis; +// mod abs_fp16x16; +// mod abs_fp8x23; +// mod abs_i32; +// mod abs_i8; +// mod acos_fp16x16; +// mod acos_fp8x23; +// mod acosh_fp16x16; +// mod acosh_fp8x23; +// mod add_fp16x16; +// mod add_fp16x16_broadcast; +// mod add_fp8x23; +// mod add_fp8x23_broadcast; +// mod add_i32; +// mod add_i32_broadcast; +// mod add_i8; +// mod add_i8_broadcast; +// mod add_u32; +// mod add_u32_broadcast; +// mod argmin_fp16x16_1D_default; +// mod argmin_fp16x16_1D_keepdims_false; +// mod argmin_fp16x16_1D_last_index; +// mod argmin_fp16x16_2D_default; +// mod argmin_fp16x16_2D_keepdims_false; +// mod argmin_fp16x16_2D_last_index; +// mod argmin_fp16x16_3D_default; +// mod argmin_fp16x16_3D_keepdims_false; +// mod argmin_fp16x16_3D_last_index; +// mod argmin_fp8x23_1D_default; +// mod argmin_fp8x23_1D_keepdims_false; +// mod argmin_fp8x23_1D_last_index; +// mod argmin_fp8x23_2D_default; +// mod argmin_fp8x23_2D_keepdims_false; +// mod argmin_fp8x23_2D_last_index; +// mod argmin_fp8x23_3D_default; +// mod argmin_fp8x23_3D_keepdims_false; +// mod argmin_fp8x23_3D_last_index; +// mod argmin_i32_1D_default; +// mod argmin_i32_1D_keepdims_false; +// mod argmin_i32_1D_last_index; +// mod argmin_i32_2D_default; +// mod argmin_i32_2D_keepdims_false; +// mod argmin_i32_2D_last_index; +// mod argmin_i32_3D_default; +// mod argmin_i32_3D_keepdims_false; +// mod argmin_i32_3D_last_index; +// mod argmin_i8_1D_default; +// mod argmin_i8_1D_keepdims_false; +// mod argmin_i8_1D_last_index; +// mod argmin_i8_2D_default; +// mod argmin_i8_2D_keepdims_false; +// mod argmin_i8_2D_last_index; +// mod argmin_i8_3D_default; +// mod argmin_i8_3D_keepdims_false; +// mod argmin_i8_3D_last_index; +// mod argmin_u32_1D_default; +// mod argmin_u32_1D_keepdims_false; +// mod argmin_u32_1D_last_index; +// mod argmin_u32_2D_default; +// mod argmin_u32_2D_keepdims_false; +// mod argmin_u32_2D_last_index; +// mod argmin_u32_3D_default; +// mod argmin_u32_3D_keepdims_false; +// mod argmin_u32_3D_last_index; +// mod asin_fp16x16; +// mod asin_fp8x23; +// mod asinh_fp16x16; +// mod asinh_fp8x23; +// mod atan_fp16x16; +// mod atan_fp8x23; +// mod ceil_fp16x16; +// mod ceil_fp8x23; +// mod concat_fp16x16_1d; +// mod concat_fp16x16_2d; +// mod concat_fp16x16_3d_default; +// mod concat_fp16x16_3d_axis_1; +// mod concat_fp16x16_3d_axis_2; +// mod concat_fp16x16_3d_three_tensors_axis_1; +// mod concat_fp16x16_3d_three_tensors_axis_2; +// mod concat_fp8x23_1d; +// mod concat_fp8x23_2d; +// mod concat_fp8x23_3d_default; +// mod concat_fp8x23_3d_axis_1; +// mod concat_fp8x23_3d_axis_2; +// mod concat_fp8x23_3d_three_tensors_axis_1; +// mod concat_fp8x23_3d_three_tensors_axis_2; +// mod concat_i32_1d; +// mod concat_i32_2d; +// mod concat_i32_3d_default; +// mod concat_i32_3d_axis_1; +// mod concat_i32_3d_axis_2; +// mod concat_i32_3d_three_tensors_axis_1; +// mod concat_i32_3d_three_tensors_axis_2; +// mod concat_i8_1d; +// mod concat_i8_2d; +// mod concat_i8_3d_default; +// mod concat_i8_3d_axis_1; +// mod concat_i8_3d_axis_2; +// mod concat_i8_3d_three_tensors_axis_1; +// mod concat_i8_3d_three_tensors_axis_2; +// mod concat_u32_1d; +// mod concat_u32_2d; +// mod concat_u32_3d_default; +// mod concat_u32_3d_axis_1; +// mod concat_u32_3d_axis_2; +// mod concat_u32_3d_three_tensors_axis_1; +// mod concat_u32_3d_three_tensors_axis_2; +// mod cos_fp16x16; +// mod cos_fp8x23; +// mod cosh_fp16x16; +// mod cosh_fp8x23; +// mod cumsum_fp16x16_1d_default; +// mod cumsum_fp16x16_1d_exclusive; +// mod cumsum_fp16x16_1d_reverse; +// mod cumsum_fp16x16_1d_reverse_exclusive; +// mod cumsum_fp16x16_2d_axis_0; +// mod cumsum_fp16x16_2d_axis_1; +// mod cumsum_fp8x23_1d_default; +// mod cumsum_fp8x23_1d_exclusive; +// mod cumsum_fp8x23_1d_reverse; +// mod cumsum_fp8x23_1d_reverse_exclusive; +// mod cumsum_fp8x23_2d_axis_0; +// mod cumsum_fp8x23_2d_axis_1; +// mod cumsum_i32_1d_default; +// mod cumsum_i32_1d_exclusive; +// mod cumsum_i32_1d_reverse; +// mod cumsum_i32_1d_reverse_exclusive; +// mod cumsum_i32_2d_axis_0; +// mod cumsum_i32_2d_axis_1; +// mod cumsum_i8_1d_default; +// mod cumsum_i8_1d_exclusive; +// mod cumsum_i8_1d_reverse; +// mod cumsum_i8_1d_reverse_exclusive; +// mod cumsum_i8_2d_axis_0; +// mod cumsum_i8_2d_axis_1; +// mod cumsum_u32_1d_default; +// mod cumsum_u32_1d_exclusive; +// mod cumsum_u32_1d_reverse; +// mod cumsum_u32_1d_reverse_exclusive; +// mod cumsum_u32_2d_axis_0; +// mod cumsum_u32_2d_axis_1; +// mod div_fp16x16; +// mod div_fp16x16_broadcast; +// mod div_fp8x23; +// mod div_fp8x23_broadcast; +// mod div_i32; +// mod div_i32_broadcast; +// mod div_i8; +// mod div_i8_broadcast; +// mod div_u32; +// mod div_u32_broadcast; +// mod equal_fp16x16; +// mod equal_fp16x16_broadcast; +// mod equal_fp8x23; +// mod equal_fp8x23_broadcast; +// mod equal_i32; +// mod equal_i32_broadcast; +// mod equal_i8; +// mod equal_i8_broadcast; +// mod equal_u32; +// mod equal_u32_broadcast; +// mod exp_fp16x16; +// mod exp_fp8x23; +// mod less_equal_fp16x16; +// mod less_equal_fp16x16_broadcast; +// mod less_equal_fp8x23; +// mod less_equal_fp8x23_broadcast; +// mod less_equal_i32; +// mod less_equal_i32_broadcast; +// mod less_equal_i8; +// mod less_equal_i8_broadcast; +// mod less_equal_u32; +// mod less_equal_u32_broadcast; +// mod greater_fp16x16; +// mod greater_fp16x16_broadcast; +// mod greater_fp8x23; +// mod greater_fp8x23_broadcast; +// mod greater_i32; +// mod greater_i32_broadcast; +// mod greater_i8; +// mod greater_i8_broadcast; +// mod greater_u32; +// mod greater_u32_broadcast; +// mod leaky_relu_fp16x16; +// mod leaky_relu_fp8x23; +// mod linear_fp16x16; +// mod linear_fp8x23; +// mod linear_i32; +// mod linear_i8; +// mod linear_u32; +// mod log_fp16x16; +// mod log_fp8x23; +// mod logsoftmax_fp16x16_axis_0; +// mod logsoftmax_fp16x16_axis_1; +// mod logsoftmax_fp8x23_axis_0; +// mod logsoftmax_fp8x23_axis_1; +// mod matmul_fp16x16_1d; +// mod matmul_fp16x16_2x2; +// mod matmul_fp16x16_2x1; +// mod matmul_fp16x16_1x2; +// mod matmul_fp8x23_1d; +// mod matmul_fp8x23_2x2; +// mod matmul_fp8x23_2x1; +// mod matmul_fp8x23_1x2; +// mod matmul_i32_1d; +// mod matmul_i32_2x2; +// mod matmul_i32_2x1; +// mod matmul_i32_1x2; +// mod matmul_i8_1d; +// mod matmul_i8_2x2; +// mod matmul_i8_2x1; +// mod matmul_i8_1x2; +// mod matmul_u32_1d; +// mod matmul_u32_2x2; +// mod matmul_u32_2x1; +// mod matmul_u32_1x2; +// mod mul_fp16x16; +// mod mul_fp16x16_broadcast; +// mod mul_fp8x23; +// mod mul_fp8x23_broadcast; +// mod mul_i32; +// mod mul_i32_broadcast; +// mod mul_i8; +// mod mul_i8_broadcast; +// mod mul_u32; +// mod mul_u32_broadcast; +// mod or_fp16x16; +// mod or_fp16x16_broadcast; +// mod or_fp8x23; +// mod or_fp8x23_broadcast; +// mod or_i32; +// mod or_i32_broadcast; +// mod or_i8; +// mod or_i8_broadcast; +// mod or_u32; +// mod or_u32_broadcast; +// mod relu_fp16x16; +// mod relu_fp8x23; +// mod relu_i32; +// mod relu_i8; +// mod sigmoid_fp16x16; +// mod sigmoid_fp8x23; +// mod sin_fp16x16; +// mod sin_fp8x23; +// mod sinh_fp16x16; +// mod sinh_fp8x23; +// mod softplus_fp8x23; +// mod softplus_fp16x16; +// mod softsign_fp8x23; +// mod softsign_fp16x16; +// mod sqrt_fp16x16; +// mod sqrt_fp8x23; +// mod sub_fp16x16; +// mod sub_fp16x16_broadcast; +// mod sub_fp8x23; +// mod sub_fp8x23_broadcast; +// mod sub_i32; +// mod sub_i32_broadcast; +// mod sub_i8; +// mod sub_i8_broadcast; +// mod sub_u32; +// mod sub_u32_broadcast; +// mod tanh_fp16x16; +// mod tanh_fp8x23; +// mod transpose_fp16x16_2d; +// mod transpose_fp16x16_3d; +// mod transpose_fp8x23_2d; +// mod transpose_fp8x23_3d; +// mod transpose_i32_2d; +// mod transpose_i32_3d; +// mod transpose_i8_2d; +// mod transpose_i8_3d; +// mod transpose_u32_2d; +// mod transpose_u32_3d; +// mod xor_fp16x16; +// mod xor_fp16x16_broadcast; +// mod xor_fp8x23; +// mod xor_fp8x23_broadcast; +// mod xor_i32; +// mod xor_i32_broadcast; +// mod xor_i8; +// mod xor_i8_broadcast; +// mod xor_u32; +// mod xor_u32_broadcast; +// mod greater_equal_fp16x16; +// mod greater_equal_fp16x16_broadcast; +// mod greater_equal_fp8x23; +// mod greater_equal_fp8x23_broadcast; +// mod greater_equal_i32; +// mod greater_equal_i32_broadcast; +// mod greater_equal_i8; +// mod greater_equal_i8_broadcast; +// mod greater_equal_u32; +// mod greater_equal_u32_broadcast; +// mod slice_fp16x16_2d; +// mod slice_fp16x16_3d; +// mod slice_fp8x23_2d; +// mod slice_fp8x23_3d; +// mod slice_i32_2d; +// mod slice_i32_3d; +// mod slice_i8_2d; +// mod slice_i8_3d; +// mod slice_u32_2d; +// mod slice_u32_3d; +// mod nonzero_fp16x16_2d; +// mod nonzero_fp16x16_3d; +// mod nonzero_fp8x23_2d; +// mod nonzero_fp8x23_3d; +// mod nonzero_i32_2d; +// mod nonzero_i32_3d; +// mod nonzero_i8_2d; +// mod nonzero_i8_3d; +// mod nonzero_u32_2d; +// mod nonzero_u32_3d; +// mod squeeze_fP16x16; +// mod squeeze_fP8x23; +// mod squeeze_i32; +// mod squeeze_i8; +// mod squeeze_u32; +// mod unsqueeze_fp16x16_2d; +// mod unsqueeze_fp16x16_3d; +// mod unsqueeze_fp8x23_2d; +// mod unsqueeze_fp8x23_3d; +// mod unsqueeze_i32_2d; +// mod unsqueeze_i32_3d; +// mod unsqueeze_i8_2d; +// mod unsqueeze_i8_3d; +// mod unsqueeze_u32_2d; +// mod unsqueeze_u32_3d; +// mod sign_fP16x16; +// mod sign_fP8x23; +// mod sign_fail; +// mod sign_i32; +// mod sign_i8; +// mod clip_fp16x16_2d; +// mod clip_fp16x16_3d; +// mod clip_fp8x23_2d; +// mod clip_fp8x23_3d; +// mod clip_i32_2d; +// mod clip_i32_3d; +// mod clip_i8_2d; +// mod clip_i8_3d; +// mod clip_u32_2d; +// mod clip_u32_3d; +// mod identity_fP16x16; +// mod identity_fP8x23; +// mod identity_i32; +// mod identity_i8; +// mod identity_u32; +// mod thresholded_relu_fp16x16; +// mod thresholded_relu_fp8x23; +// mod hard_sigmoid_fp8x23; +// mod hard_sigmoid_fp16x16; +// mod neg_fp16x16; +// mod neg_fp8x23; +// mod neg_i32; +// mod neg_i8; +// mod gemm_all_attributes; +// mod gemm_alpha; +// mod gemm_beta; +// mod gemm_default_matrix_bias; +// mod gemm_default_vector_bias; +// mod gemm_default_no_bias; +// mod gemm_transposeA; +// mod gemm_transposeB; +// mod min_fp16x16_three_tensors; +// mod min_fp16x16_broadcast_three_tensors; +// mod min_fp16x16_two_tensors; +// mod min_fp16x16_broadcast_two_tensors; +// mod min_fp8x23_three_tensors; +// mod min_fp8x23_broadcast_three_tensors; +// mod min_fp8x23_two_tensors; +// mod min_fp8x23_broadcast_two_tensors; +// mod min_i32_three_tensors; +// mod min_i32_broadcast_three_tensors; +// mod min_i32_two_tensors; +// mod min_i32_broadcast_two_tensors; +// mod min_i8_three_tensors; +// mod min_i8_broadcast_three_tensors; +// mod min_i8_two_tensors; +// mod min_i8_broadcast_two_tensors; +// mod min_u32_three_tensors; +// mod min_u32_broadcast_three_tensors; +// mod min_u32_two_tensors; +// mod min_u32_broadcast_two_tensors; +// mod where_fp16x16; +// mod where_fp16x16_broadcast; +// mod where_fp8x23; +// mod where_fp8x23_broadcast; +// mod where_i32; +// mod where_i32_broadcast; +// mod where_i8; +// mod where_i8_broadcast; +// mod where_u32; +// mod where_u32_broadcast; +// mod not_bool; +// mod round_fp16x16; +// mod round_fp8x23; +// mod max_fp16x16_three_tensors; +// mod max_fp16x16_broadcast_three_tensors; +// mod max_fp16x16_two_tensors; +// mod max_fp16x16_broadcast_two_tensors; +// mod max_fp8x23_three_tensors; +// mod max_fp8x23_broadcast_three_tensors; +// mod max_fp8x23_two_tensors; +// mod max_fp8x23_broadcast_two_tensors; +// mod max_i32_three_tensors; +// mod max_i32_broadcast_three_tensors; +// mod max_i32_two_tensors; +// mod max_i32_broadcast_two_tensors; +// mod max_i8_three_tensors; +// mod max_i8_broadcast_three_tensors; +// mod max_i8_two_tensors; +// mod max_i8_broadcast_two_tensors; +// mod max_u32_three_tensors; +// mod max_u32_broadcast_three_tensors; +// mod max_u32_two_tensors; +// mod max_u32_broadcast_two_tensors; +// mod scatter_fp16x16_3d_default; +// mod scatter_fp16x16_3d_axis1; +// mod scatter_fp16x16_3d_axis1_add; +// mod scatter_fp8x23_default; +// mod scatter_fp8x23_axis1; +// mod scatter_fp8x23_mul; +// mod scatter_i8_default; +// mod scatter_i8_axis1; +// mod scatter_i8_axis1_max; +// mod scatter_u32_default; +// mod scatter_u32_axis1; +// mod scatter_u32_add; +// mod array_feature_extractor_1D_i32; +// mod array_feature_extractor_1D_fp8x23; +// mod array_feature_extractor_1D_fp16x16; +// mod array_feature_extractor_2D_i32; +// mod array_feature_extractor_2D_fp8x23; +// mod array_feature_extractor_2D_fp16x16; +// mod array_feature_extractor_3D_i32; +// mod array_feature_extractor_3D_fp8x23; +// mod array_feature_extractor_3D_fp16x16; +// mod binarizer_fp16x16; +// mod binarizer_fp8x23; +// mod tril_fp16x16; +// mod tril_fp16x16_neg; +// mod tril_fp16x16_one_row; +// mod tril_fp16x16_out_neg; +// mod tril_fp16x16_out_pos; +// mod tril_fp16x16_pos; +// mod tril_fp16x16_square; +// mod tril_fp16x16_square_neg; +// mod tril_fp16x16_zero; +// mod triu_fp16x16; +// mod triu_fp16x16_neg; +// mod triu_fp16x16_one_row; +// mod triu_fp16x16_out_neg; +// mod triu_fp16x16_out_pos; +// mod triu_fp16x16_pos; +// mod triu_fp16x16_square; +// mod triu_fp16x16_square_neg; +// mod triu_fp16x16_zero; +// mod tril_fp8x23; +// mod tril_fp8x23_neg; +// mod tril_fp8x23_one_row; +// mod tril_fp8x23_out_neg; +// mod tril_fp8x23_out_pos; +// mod tril_fp8x23_pos; +// mod tril_fp8x23_square; +// mod tril_fp8x23_square_neg; +// mod tril_fp8x23_zero; +// mod triu_fp8x23; +// mod triu_fp8x23_neg; +// mod triu_fp8x23_one_row; +// mod triu_fp8x23_out_neg; +// mod triu_fp8x23_out_pos; +// mod triu_fp8x23_pos; +// mod triu_fp8x23_square; +// mod triu_fp8x23_square_neg; +// mod triu_fp8x23_zero; +// mod tril_i32; +// mod tril_neg_i32; +// mod tril_i32_one_row; +// mod tril_i32_out_neg; +// mod tril_i32_out_pos; +// mod tril_i32_pos; +// mod tril_i32_square; +// mod tril_i32_square_neg; +// mod tril_i32_zero; +// mod triu_i32; +// mod triu_i32_neg; +// mod triu_i32_one_row; +// mod triu_i32_out_neg; +// mod triu_i32_out_pos; +// mod triu_i32_pos; +// mod triu_i32_square; +// mod triu_i32_square_neg; +// mod triu_i32_zero; +// mod tril_i8; +// mod tril_i8_neg; +// mod tril_i8_one_row; +// mod tril_i8_out_neg; +// mod tril_i8_out_pos; +// mod tril_i8_pos; +// mod tril_i8_square; +// mod tril_i8_square_neg; +// mod tril_i8_zero; +// mod triu_i8; +// mod triu_i8_neg; +// mod triu_i8_one_row; +// mod triu_i8_out_neg; +// mod triu_i8_out_pos; +// mod triu_i8_pos; +// mod triu_i8_square; +// mod triu_i8_square_neg; +// mod triu_i8_zero; +// mod tril_u32; +// mod tril_u32_neg; +// mod tril_u32_one_row; +// mod tril_u32_out_neg; +// mod tril_u32_out_pos; +// mod tril_u32_pos; +// mod tril_u32_square; +// mod tril_u32_square_neg; +// mod tril_u32_zero; +// mod triu_u32; +// mod triu_u32_neg; +// mod triu_u32_one_row; +// mod triu_u32_out_neg; +// mod triu_u32_out_pos; +// mod triu_u32_pos; +// mod triu_u32_square; +// mod triu_u32_square_neg; +// mod triu_u32_zero; +// mod reduce_sum_square_fp16x16_export_do_not_keepdims; +// mod reduce_sum_square_fp16x16_export_keepdims; +// mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; +// mod reduce_sum_square_fp8x23_export_do_not_keepdims; +// mod reduce_sum_square_fp8x23_export_keepdims; +// mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; +// mod reduce_sum_square_i32_export_do_not_keepdims; +// mod reduce_sum_square_i32_export_keepdims; +// mod reduce_sum_square_i32_export_negative_axes_keepdims; +// mod reduce_sum_square_i8_export_do_not_keepdims; +// mod reduce_sum_square_i8_export_keepdims; +// mod reduce_sum_square_i8_export_negative_axes_keepdims; +// mod reduce_sum_square_u32_export_do_not_keepdims; +// mod reduce_sum_square_u32_export_keepdims; +// mod reduce_sum_square_u32_export_negative_axes_keepdims; +// mod reduce_l2_fp16x16_export_do_not_keepdims; +// mod reduce_l2_fp16x16_export_keepdims; +// mod reduce_l2_fp16x16_export_negative_axes_keepdims; +// mod reduce_l2_fp8x23_export_do_not_keepdims; +// mod reduce_l2_fp8x23_export_keepdims; +// mod reduce_l2_fp8x23_export_negative_axes_keepdims; +// mod reduce_l1_fp16x16_export_do_not_keepdims; +// mod reduce_l1_fp16x16_export_keepdims; +// mod reduce_l1_fp16x16_export_negative_axes_keepdims; +// mod reduce_l1_fp8x23_export_do_not_keepdims; +// mod reduce_l1_fp8x23_export_keepdims; +// mod reduce_l1_fp8x23_export_negative_axes_keepdims; +// mod reduce_l1_i32_export_do_not_keepdims; +// mod reduce_l1_i32_export_keepdims; +// mod reduce_l1_i32_export_negative_axes_keepdims; +// mod reduce_l1_i8_export_do_not_keepdims; +// mod reduce_l1_i8_export_keepdims; +// mod reduce_l1_i8_export_negative_axes_keepdims; +// mod reduce_l1_u32_export_do_not_keepdims; +// mod reduce_l1_u32_export_keepdims; +// mod reduce_l1_u32_export_negative_axes_keepdims; +// mod reduce_prod_fp16x16_1D; +// mod reduce_prod_fp16x16_2D_default; +// mod reduce_prod_fp16x16_2D_keepdims; +// mod reduce_prod_fp16x16_2D_axis_1; +// mod reduce_prod_fp8x23_1D; +// mod reduce_prod_fp8x23_2D_default; +// mod reduce_prod_fp8x23_2D_keepdims; +// mod reduce_prod_fp8x23_2D_axis_1; +// mod reduce_prod_i32_1D; +// mod reduce_prod_i32_2D_default; +// mod reduce_prod_i32_2D_keepdims; +// mod reduce_prod_i32_2D_axis_1; +// mod reduce_prod_i8_1D; +// mod reduce_prod_i8_2D_default; +// mod reduce_prod_i8_2D_keepdims; +// mod reduce_prod_i8_2D_axis_1; +// mod reduce_prod_u32_1D; +// mod reduce_prod_u32_2D_default; +// mod reduce_prod_u32_2D_keepdims; +// mod reduce_prod_u32_2D_axis_1; +// mod sequence_length_fp16x16; +// mod sequence_length_fp16x16_broadcast; +// mod sequence_length_fp8x23; +// mod sequence_length_fp8x23_broadcast; +// mod sequence_length_i32; +// mod sequence_length_i32_broadcast; +// mod sequence_length_i8; +// mod sequence_length_i8_broadcast; +// mod sequence_length_u32; +// mod sequence_length_u32_broadcast; +// mod sequence_at_u32_positive; +// mod sequence_at_u32_negative; +// mod sequence_at_fp16x16_positive; +// mod sequence_at_fp16x16_negative; +// mod sequence_at_fp8x23_positive; +// mod sequence_at_fp8x23_negative; +// mod sequence_at_i32_positive; +// mod sequence_at_i32_negative; +// mod sequence_at_i8_positive; +// mod sequence_at_i8_negative; +// mod reduce_min_fp16x16_1D; +// mod reduce_min_fp16x16_2D_default; +// mod reduce_min_fp16x16_2D_keepdims; +// mod reduce_min_fp16x16_2D_axis_1; +// mod reduce_min_fp8x23_1D; +// mod reduce_min_fp8x23_2D_default; +// mod reduce_min_fp8x23_2D_keepdims; +// mod reduce_min_fp8x23_2D_axis_1; +// mod reduce_min_i32_1D; +// mod reduce_min_i32_2D_default; +// mod reduce_min_i32_2D_keepdims; +// mod reduce_min_i32_2D_axis_1; +// mod reduce_min_i8_1D; +// mod reduce_min_i8_2D_default; +// mod reduce_min_i8_2D_keepdims; +// mod reduce_min_i8_2D_axis_1; +// mod reduce_min_u32_1D; +// mod reduce_min_u32_2D_default; +// mod reduce_min_u32_2D_keepdims; +// mod reduce_min_u32_2D_axis_1; +// mod sequence_construct_fp16x16; +// mod sequence_construct_fp8x23; +// mod sequence_construct_i32; +// mod sequence_construct_i8; +// mod sequence_construct_u32; +// mod shrink_hard_fp16x16; +// mod shrink_soft_fp16x16; +// mod shrink_hard_fp8x23; +// mod shrink_soft_fp8x23; +// mod sequence_empty_fp16x16; +// mod sequence_empty_fp8x23; +// mod sequence_empty_i32; +// mod sequence_empty_i8; +// mod sequence_empty_u32; +// mod reduce_mean_fp16x16_1D; +// mod reduce_mean_fp16x16_2D_default; +// mod reduce_mean_fp16x16_2D_keepdims; +// mod reduce_mean_fp16x16_2D_axis_1; +// mod reduce_mean_fp8x23_1D; +// mod reduce_mean_fp8x23_2D_default; +// mod reduce_mean_fp8x23_2D_keepdims; +// mod reduce_mean_fp8x23_2D_axis_1; +// mod reduce_mean_i32_1D; +// mod reduce_mean_i32_2D_default; +// mod reduce_mean_i32_2D_keepdims; +// mod reduce_mean_i32_2D_axis_1; +// mod reduce_mean_i8_1D; +// mod reduce_mean_i8_2D_default; +// mod reduce_mean_i8_2D_keepdims; +// mod reduce_mean_i8_2D_axis_1; +// mod reduce_mean_u32_1D; +// mod reduce_mean_u32_2D_default; +// mod reduce_mean_u32_2D_keepdims; +// mod reduce_mean_u32_2D_axis_1; +// mod pow_fp16x16; +// mod pow_fp16x16_broadcast; +// mod pow_fp8x23; +// mod pow_fp8x23_broadcast; +// mod sequence_erase_u32_positive; +// mod sequence_erase_u32_negative; +// mod sequence_erase_u32_empty; +// mod sequence_erase_fp16x16_positive; +// mod sequence_erase_fp16x16_negative; +// mod sequence_erase_fp16x16_empty; +// mod sequence_erase_fp8x23_positive; +// mod sequence_erase_fp8x23_negative; +// mod sequence_erase_fp8x23_empty; +// mod sequence_erase_i32_positive; +// mod sequence_erase_i32_negative; +// mod sequence_erase_i32_empty; +// mod sequence_erase_i8_positive; +// mod sequence_erase_i8_negative; +// mod sequence_erase_i8_empty; +// mod sequence_insert_fp16x16; +// mod sequence_insert_fp8x23; +// mod sequence_insert_i32; +// mod sequence_insert_i8; +// mod sequence_insert_u32; +// mod concat_from_sequence_fp8x23_new_axis_zero; +// mod concat_from_sequence_fp8x23_new_axis_one; +// mod concat_from_sequence_fp8x23_new_axis_default; +// mod concat_from_sequence_fp16x16_new_axis_zero; +// mod concat_from_sequence_fp16x16_new_axis_one; +// mod concat_from_sequence_fp16x16_new_axis_default; +// mod concat_from_sequence_i32_new_axis_zero; +// mod concat_from_sequence_i32_new_axis_one; +// mod concat_from_sequence_i32_new_axis_default; +// mod concat_from_sequence_i8_new_axis_zero; +// mod concat_from_sequence_i8_new_axis_one; +// mod concat_from_sequence_i8_new_axis_default; +// mod concat_from_sequence_u32_new_axis_zero; +// mod concat_from_sequence_u32_new_axis_one; +// mod concat_from_sequence_u32_new_axis_default; +// mod is_nan_fp16x16; +// mod is_nan_fp8x23; +// mod is_inf_fp16x16; +// mod is_inf_fp8x23; +// mod is_inf_i32; +// mod is_inf_i8; +// mod is_inf_u32; +// mod is_pos_inf_fp16x16; +// mod is_neg_inf_fp16x16; +// mod is_pos_inf_fp8x23; +// mod is_neg_inf_fp8x23; +// mod is_pos_inf_i32; +// mod is_neg_inf_i32; +// mod is_pos_inf_i8; +// mod is_neg_inf_i8; +// mod reduce_log_sum_fp8x23_export_do_not_keepdims; +// mod reduce_log_sum_fp8x23_export_keepdims; +// mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; +// mod reduce_log_sum_fp16x16_export_do_not_keepdims; +// mod reduce_log_sum_fp16x16_export_keepdims; +// mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; +// mod and_bool; +// mod erf_fp16x16; +// mod erf_fp8x23; +// mod unique_fp16x16_without_axis_sorted; +// mod unique_fp16x16_with_axis_zero_sorted; +// mod unique_u32_without_axis_sorted; +// mod unique_u32_without_axis_not_sorted; +// mod unique_u32_with_axis_zero_sorted; +// mod unique_u32_with_axis_zero_not_sorted; +// mod unique_u32_with_axis_one_sorted; +// mod unique_u32_with_axis_one_not_sorted; +// mod gather_nd_fp16x16_3d_default; +// mod gather_nd_fp16x16_3d_batch_dims1; +// mod gather_nd_fp16x16_3d_batch_dims2; +// mod gather_nd_fp8x23_3d_default; +// mod gather_nd_fp8x23_3d_batch_dims1; +// mod gather_nd_fp8x23_3d_batch_dims2; +// mod gather_nd_i32_3d_default; +// mod gather_nd_i32_3d_batch_dims1; +// mod gather_nd_i32_3d_batch_dims2; +// mod gather_nd_i8_3d_default; +// mod gather_nd_i8_3d_batch_dims1; +// mod gather_nd_u32_default; +// mod gather_nd_u32_batch_dims1; +// mod gather_nd_u32_batch_dims2; +// mod resize_upsample_scales_nearest; +// mod resize_downsample_scales_cubic; +// mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; +// mod resize_downsample_scales_cubic_align_corners; +// mod resize_upsample_scales_linear; +// mod resize_downsample_scales_linear_align_corners; +// mod resize_downsample_scales_nearest; +// mod resize_upsample_scales_cubic; +// mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; +// mod resize_upsample_scales_cubic_align_corners; +// mod resize_upsample_scales_cubic_asymmetric; +// mod resize_upsample_scales_linear_align_corners; +// mod resize_upsample_sizes_nearest; +// mod resize_upsample_sizes_cubic; +// mod resize_downsample_sizes_cubic; +// mod resize_downsample_sizes_nearest; +// mod resize_upsample_scales_linear_half_pixel_symmetric; +// mod resize_downsample_scales_cubic_antialias; +// mod resize_downsample_scales_linear_antialias; +// mod resize_downsample_sizes_cubic_antialias; +// mod resize_downsample_sizes_linear_pytorch_half_pixel; +// mod resize_tf_crop_and_resize; +// mod resize_tf_crop_and_resize_extrapolation_value; +// mod resize_upsample_scales_nearest_axes_2_3; +// mod resize_upsample_scales_nearest_axes_3_2; +// mod resize_upsample_sizes_nearest_axes_2_3; +// mod resize_upsample_sizes_nearest_ceil_half_pixel; +// mod resize_upsample_sizes_nearest_floor_align_corners; +// mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; +// mod resize_downsample_scales_linear_half_pixel_symmetric; +// mod resize_downsample_sizes_nearest_not_larger; +// mod resize_downsample_sizes_nearest_not_smaller; +// mod resize_tf_crop_and_resize_axes_2_3; +// mod resize_tf_crop_and_resize_axes_3_2; +// mod resize_upsample_sizes_nearest_axes_3_2; +// mod resize_upsample_sizes_nearest_not_larger; +// mod resize_upsample_sizes_nearest_not_smaller; +// mod compress_fp16x16_3d_default; +// mod compress_fp16x16_3d_axis1; +// mod compress_fp16x16_3d_axis2; +// mod compress_fp16x16_3d_axis3; +// mod compress_fp16x16_3d_noaxis; +// mod compress_fp8x23_3d_default; +// mod compress_fp8x23_3d_axis1; +// mod compress_fp8x23_3d_axis2; +// mod compress_i32_3d_default; +// mod compress_i32_3d_axis1; +// mod compress_i32_3d_axis2; +// mod compress_i8_3d_default; +// mod compress_i8_3d_axis1; +// mod compress_i8_3d_axis2; +// mod compress_u32_3d_default; +// mod compress_u32_3d_axis1; +// mod compress_u32_3d_axis2; +// mod compress_u32_3d_axis2_2; +// mod compress_u32_3d_axis3; +// mod reduce_log_sum_exp_fp32x32_export_do_not_keepdims; +// mod reduce_log_sum_exp_fp32x32_export_keepdims; +// mod reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims; +// mod layer_normalization_default_axis; +// mod layer_normalization_4d_axis0; +// mod layer_normalization_4d_axis1; +// mod layer_normalization_4d_axis2; +// mod layer_normalization_4d_axis3; +// mod layer_normalization_3d_axis0_epsilon; +// mod layer_normalization_3d_axis_negative_3_epsilon; +// mod layer_normalization_3d_axis1_epsilon; +// mod layer_normalization_3d_axis2_epsilon; +// mod layer_normalization_4d_axis_negative_4; +// mod layer_normalization_4d_axis_negative_3; +// mod layer_normalization_4d_axis_negative_2; +// mod layer_normalization_4d_axis_negative_1; +// mod layer_normalization_3d_axis_negative_2_epsilon; +// mod layer_normalization_3d_axis_negative_1_epsilon; +// mod layer_normalization_test; +// mod split_u32_1d_equal_parts; +// mod split_u32_2d_equal_parts; +// mod split_u32_zero_size; +// mod split_u32_1d_variable_parts; +// mod split_u32_2d_variable_parts; +// mod split_u32_1d_uneven; +// mod split_u32_2d_uneven; +// mod split_fp16x16_1d_equal_parts; +// mod split_fp16x16_1d_variable_parts; +// mod split_fp16x16_2d_equal_parts; +// mod split_fp16x16_2d_variable_parts; +// mod split_fp16x16_zero_size; +// mod split_fp16x16_1d_uneven; +// mod split_fp16x16_2d_uneven; +// mod grid_sample; +// mod grid_sample_cubic; +// mod grid_sample_aligncorners; +// mod grid_sample_nearest; +// mod grid_sample_nearest_aligncorner; +// mod grid_sample_padding_border; +// mod grid_sample_padding_reflection; +// mod grid_sample_padding_zeros; +// mod col2im; +// mod col2im_5D; +// mod col2im_dilations; +// mod col2im_pads; +// mod col2im_strides; +// mod random_uniform_like_fp16x16; +// mod random_uniform_like_fp8x23; +// mod range_fp8x23; +// mod range_fp16x16; +// mod range_i32; +// mod range_i8; +// mod range_u32; +// mod hann_window_fp8x23; +// mod hann_window_fp16x16; +// mod hamming_window_fp16x16; +// mod hamming_window_fp8x23; +// mod blackman_window_fp16x16; +// mod blackman_window_fp8x23; +// mod split_to_sequence_fp16x16_1d_equal_parts; +// mod split_to_sequence_fp16x16_1d_variable_parts; +// mod split_to_sequence_fp16x16_2d_equal_parts; +// mod split_to_sequence_fp16x16_2d_variable_parts; +// mod split_to_sequence_fp16x16_zero_size; +// mod split_to_sequence_fp16x16_1d_uneven; +// mod split_to_sequence_fp16x16_2d_uneven; +// mod split_to_sequence_u32_1d_equal_parts; +// mod split_to_sequence_u32_1d_variable_parts; +// mod split_to_sequence_u32_2d_equal_parts; +// mod split_to_sequence_u32_2d_variable_parts; +// mod split_to_sequence_u32_zero_size; +// mod split_to_sequence_u32_1d_uneven; +// mod split_to_sequence_u32_2d_uneven; +// mod split_to_sequence_2d_scalar; +// mod split_to_sequence_2d_nokeepdims; +// mod split_to_sequence_1d_nokeepdims; +// mod reverse_sequence_fp16x16_batch_equal_parts; +// mod reverse_sequence_fp16x16_time_equal_parts; +// mod reverse_sequence_i32_batch_equal_parts; +// mod reverse_sequence_i32_time_equal_parts; +// mod reverse_sequence_i8_batch_equal_parts; +// mod reverse_sequence_i8_time_equal_parts; +// mod reverse_sequence_u32_4x4_batch; +// mod reverse_sequence_u32_4x4_time; +// mod reverse_sequence_u32_3x3_batch; +// mod reverse_sequence_u32_3x3_time; +// mod reverse_sequence_different_dimensions_4_5; +// mod reverse_sequence_different_dimensions_2_4; +// mod reverse_sequence_different_dimensions_1_6; +// mod reverse_sequence_different_dimensions_3x9_batch; +// mod reverse_sequence_different_dimensions_3x9_time; +// mod conv_transpose; +// mod conv_transpose_1d; +// mod conv_transpose_3d; +// mod conv_transpose_attributes; +// mod conv_transpose_autopad_same; +// mod conv_transpose_dilations; +// mod conv_transpose_pads; +// mod conv_transpose_group_2; +// mod conv_transpose_group_2_image_3; +// mod depth_to_space_fp16x16; +// mod depth_to_space_fp8x23; +// mod depth_to_space_i32; +// mod depth_to_space_i8; +// mod depth_to_space_u32; +// mod space_to_depth_fp16x16; +// mod space_to_depth_fp8x23; +// mod space_to_depth_i32; +// mod space_to_depth_i8; +// mod space_to_depth_u32; +// mod scatter_nd_fp16x16_3d_default; +// mod scatter_nd_fp16x16_3d_add; +// mod scatter_nd_fp16x16_3d_mul; +// mod scatter_nd_fp16x16_3d_max; +// mod scatter_nd_fp16x16_3d_min; +// mod scatter_nd_fp8x23_3d_default; +// mod scatter_nd_fp8x23_3d_add; +// mod scatter_nd_fp8x23_3d_mul; +// mod scatter_nd_fp8x23_3d_max; +// mod scatter_nd_fp8x23_3d_min; +// mod scatter_nd_u32_default; +// mod scatter_nd_u32_add; +// mod scatter_nd_u32_mul; +// mod scatter_nd_u32_max; +// mod scatter_nd_u32_min; +// mod conv_2D_with_padding; +// mod conv_1D_no_padding; +// mod conv_1D_with_padding; +// mod conv_3D_no_padding; +// mod conv_3D_with_padding; +// mod conv_4D_no_padding; +// mod conv_2D_with_2_groups; +// mod conv_2D_with_autopad_same; +// mod conv_2D_with_strides_asymmetric_padding; +// mod conv_2D_with_strides_with_padding; +// mod conv_4D_with_padding; +// mod label_encoder_fp16x16_3d_default; +// mod label_encoder_fp8x23_default; +// mod label_encoder_i8_default; +// mod label_encoder_i32_default; +// mod label_encoder_u32_default; +// mod gather_fp16x16_3d_default; +// mod gather_fp16x16_3d_axis1; +// mod gather_fp16x16_3d_axis2; +// mod gather_negative_indices; +// mod gather_negative_axis; mod less_fp16x16; mod less_fp16x16_broadcast; mod less_fp8x23; @@ -955,33 +955,33 @@ mod less_i8; mod less_i8_broadcast; mod less_u32; mod less_u32_broadcast; -mod reshape_extended_dims; -mod reshape_negative_dim; -mod reshape_negative_extended_dims; -mod reshape_one_dim; -mod reshape_reduced_dims; -mod reshape_reordered_all_dims; -mod reshape_reordered_last_dims; -mod reshape_zero_and_negative_dim; -mod reshape_zero_dim; -mod reduce_sum_default_axes_keepdims; -mod reduce_sum_empty_axes_input_noop; -mod reduce_sum_keep_dims; -mod reduce_sum_negative_axes_keepdims; -mod reduce_sum_no_keep_dims; -mod gather_elements_default; -mod gather_elements_axis1; -mod gather_elements_axis2; -mod gather_elements_negative_indices; -mod softmax_axis_0; -mod softmax_axis_1; -mod softmax_axis_2; -mod softmax_axis_minus_1; -mod argmax_default_axes_keepdims; -mod argmax_default_axes_keepdims_select_last_index; -mod argmax_keepdims; -mod argmax_keepdims_select_last_index; -mod argmax_negative_axis_keepdims; -mod argmax_negative_axis_keepdims_select_last_index; -mod argmax_no_keepdims; -mod argmax_no_keepdims_select_last_index; +// mod reshape_extended_dims; +// mod reshape_negative_dim; +// mod reshape_negative_extended_dims; +// mod reshape_one_dim; +// mod reshape_reduced_dims; +// mod reshape_reordered_all_dims; +// mod reshape_reordered_last_dims; +// mod reshape_zero_and_negative_dim; +// mod reshape_zero_dim; +// mod reduce_sum_default_axes_keepdims; +// mod reduce_sum_empty_axes_input_noop; +// mod reduce_sum_keep_dims; +// mod reduce_sum_negative_axes_keepdims; +// mod reduce_sum_no_keep_dims; +// mod gather_elements_default; +// mod gather_elements_axis1; +// mod gather_elements_axis2; +// mod gather_elements_negative_indices; +// mod softmax_axis_0; +// mod softmax_axis_1; +// mod softmax_axis_2; +// mod softmax_axis_minus_1; +// mod argmax_default_axes_keepdims; +// mod argmax_default_axes_keepdims_select_last_index; +// mod argmax_keepdims; +// mod argmax_keepdims_select_last_index; +// mod argmax_negative_axis_keepdims; +// mod argmax_negative_axis_keepdims_select_last_index; +// mod argmax_no_keepdims; +// mod argmax_no_keepdims_select_last_index; diff --git a/tests/nodes/less_fp16x16.cairo b/tests/nodes/less_fp16x16.cairo index e81287d03..49840aa1a 100644 --- a/tests/nodes/less_fp16x16.cairo +++ b/tests/nodes/less_fp16x16.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16TensorPartialEq; use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_fp16x16/input_0.cairo b/tests/nodes/less_fp16x16/input_0.cairo index 0921e77c9..40500ebda 100644 --- a/tests/nodes/less_fp16x16/input_0.cairo +++ b/tests/nodes/less_fp16x16/input_0.cairo @@ -11,31 +11,31 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16/input_1.cairo b/tests/nodes/less_fp16x16/input_1.cairo index 2951ab380..c28c2a921 100644 --- a/tests/nodes/less_fp16x16/input_1.cairo +++ b/tests/nodes/less_fp16x16/input_1.cairo @@ -12,30 +12,30 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16/output_0.cairo b/tests/nodes/less_fp16x16/output_0.cairo index 188202362..d6da68806 100644 --- a/tests/nodes/less_fp16x16/output_0.cairo +++ b/tests/nodes/less_fp16x16/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(false); - data.append(false); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(1); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(1); + data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16_broadcast.cairo b/tests/nodes/less_fp16x16_broadcast.cairo index 20b4b839f..0673dc984 100644 --- a/tests/nodes/less_fp16x16_broadcast.cairo +++ b/tests/nodes/less_fp16x16_broadcast.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::FP16x16TensorPartialEq; use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::BoolTensor; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_fp16x16_broadcast/input_0.cairo b/tests/nodes/less_fp16x16_broadcast/input_0.cairo index 6b2a71053..24f7ea113 100644 --- a/tests/nodes/less_fp16x16_broadcast/input_0.cairo +++ b/tests/nodes/less_fp16x16_broadcast/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 0, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16_broadcast/input_1.cairo b/tests/nodes/less_fp16x16_broadcast/input_1.cairo index a5cfb8938..9c396d8cc 100644 --- a/tests/nodes/less_fp16x16_broadcast/input_1.cairo +++ b/tests/nodes/less_fp16x16_broadcast/input_1.cairo @@ -10,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 196608, sign: true }); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp16x16_broadcast/output_0.cairo b/tests/nodes/less_fp16x16_broadcast/output_0.cairo index b02badbac..4d540be9d 100644 --- a/tests/nodes/less_fp16x16_broadcast/output_0.cairo +++ b/tests/nodes/less_fp16x16_broadcast/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(false); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(1); + data.append(1); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(0); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23.cairo b/tests/nodes/less_fp8x23.cairo index ff63fe9f2..5cee9f0a6 100644 --- a/tests/nodes/less_fp8x23.cairo +++ b/tests/nodes/less_fp8x23.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{TensorTrait, Tensor}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_fp8x23/input_0.cairo b/tests/nodes/less_fp8x23/input_0.cairo index aade41632..8100a6b6c 100644 --- a/tests/nodes/less_fp8x23/input_0.cairo +++ b/tests/nodes/less_fp8x23/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23/input_1.cairo b/tests/nodes/less_fp8x23/input_1.cairo index 94dfe4f24..01c04d7c3 100644 --- a/tests/nodes/less_fp8x23/input_1.cairo +++ b/tests/nodes/less_fp8x23/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 8388608, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23/output_0.cairo b/tests/nodes/less_fp8x23/output_0.cairo index 24e0b32fa..d926a66e9 100644 --- a/tests/nodes/less_fp8x23/output_0.cairo +++ b/tests/nodes/less_fp8x23/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(1); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(1); + data.append(1); + data.append(0); + data.append(1); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23_broadcast.cairo b/tests/nodes/less_fp8x23_broadcast.cairo index b1eae545f..65b5079f0 100644 --- a/tests/nodes/less_fp8x23_broadcast.cairo +++ b/tests/nodes/less_fp8x23_broadcast.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::FP8x23TensorPartialEq; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{TensorTrait, Tensor}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_fp8x23_broadcast/input_0.cairo b/tests/nodes/less_fp8x23_broadcast/input_0.cairo index 841487689..86bd47a49 100644 --- a/tests/nodes/less_fp8x23_broadcast/input_0.cairo +++ b/tests/nodes/less_fp8x23_broadcast/input_0.cairo @@ -11,31 +11,31 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 16777216, sign: false }); data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); + data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23_broadcast/input_1.cairo b/tests/nodes/less_fp8x23_broadcast/input_1.cairo index 20ee2633a..fa68c319a 100644 --- a/tests/nodes/less_fp8x23_broadcast/input_1.cairo +++ b/tests/nodes/less_fp8x23_broadcast/input_1.cairo @@ -10,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: false }); + data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 25165824, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_fp8x23_broadcast/output_0.cairo b/tests/nodes/less_fp8x23_broadcast/output_0.cairo index 00ed53108..1f24d9fbf 100644 --- a/tests/nodes/less_fp8x23_broadcast/output_0.cairo +++ b/tests/nodes/less_fp8x23_broadcast/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(true); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32.cairo b/tests/nodes/less_i32.cairo index 9e003aec4..d68e38bc0 100644 --- a/tests/nodes/less_i32.cairo +++ b/tests/nodes/less_i32.cairo @@ -3,13 +3,11 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::BoolTensor; -use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::I32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_i32/input_0.cairo b/tests/nodes/less_i32/input_0.cairo index cbb18e299..78d199299 100644 --- a/tests/nodes/less_i32/input_0.cairo +++ b/tests/nodes/less_i32/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-3); - data.append(-2); - data.append(0); - data.append(1); data.append(1); - data.append(1); - data.append(0); - data.append(-1); - data.append(0); + data.append(-3); data.append(-1); + data.append(-3); + data.append(2); data.append(1); data.append(-2); - data.append(0); data.append(-2); - data.append(0); - data.append(-1); data.append(-1); + data.append(2); data.append(-2); + data.append(0); data.append(2); + data.append(-3); + data.append(-1); data.append(-2); + data.append(-3); data.append(-1); - data.append(1); + data.append(0); data.append(-2); data.append(1); - data.append(-2); - data.append(-2); + data.append(-1); + data.append(2); data.append(1); + data.append(1); + data.append(2); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32/input_1.cairo b/tests/nodes/less_i32/input_1.cairo index bff10843b..3454aab15 100644 --- a/tests/nodes/less_i32/input_1.cairo +++ b/tests/nodes/less_i32/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(1); data.append(2); data.append(0); - data.append(1); - data.append(-2); - data.append(-2); - data.append(2); - data.append(2); + data.append(0); data.append(2); + data.append(1); + data.append(-3); + data.append(1); + data.append(1); data.append(-1); data.append(0); + data.append(0); + data.append(-2); + data.append(-1); data.append(1); data.append(2); - data.append(-1); data.append(0); + data.append(2); + data.append(2); data.append(0); data.append(1); - data.append(2); - data.append(-2); - data.append(-3); - data.append(-1); - data.append(-3); + data.append(1); data.append(0); - data.append(-2); - data.append(-3); data.append(2); data.append(2); + data.append(-2); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32/output_0.cairo b/tests/nodes/less_i32/output_0.cairo index fa40143fd..8e38fd1fd 100644 --- a/tests/nodes/less_i32/output_0.cairo +++ b/tests/nodes/less_i32/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(true); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(1); + data.append(0); + data.append(1); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32_broadcast.cairo b/tests/nodes/less_i32_broadcast.cairo index 56bbe7722..437596143 100644 --- a/tests/nodes/less_i32_broadcast.cairo +++ b/tests/nodes/less_i32_broadcast.cairo @@ -3,13 +3,11 @@ mod input_1; mod output_0; -use orion::operators::tensor::{I32Tensor, I32TensorAdd}; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::BoolTensor; -use core::array::{ArrayTrait, SpanTrait}; -use orion::utils::{assert_eq, assert_seq_eq}; use orion::operators::tensor::{TensorTrait, Tensor}; +use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::I32TensorPartialEq; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_i32_broadcast/input_0.cairo b/tests/nodes/less_i32_broadcast/input_0.cairo index fe220acc6..a4eacc274 100644 --- a/tests/nodes/less_i32_broadcast/input_0.cairo +++ b/tests/nodes/less_i32_broadcast/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-3); + data.append(-1); + data.append(-1); + data.append(-1); data.append(1); data.append(2); + data.append(-3); data.append(-2); + data.append(-3); + data.append(-1); + data.append(-3); data.append(0); - data.append(-2); - data.append(1); - data.append(0); - data.append(1); - data.append(0); - data.append(2); - data.append(0); + data.append(-3); data.append(-1); data.append(-1); - data.append(-2); - data.append(1); - data.append(-2); - data.append(2); - data.append(2); data.append(2); data.append(0); + data.append(0); + data.append(-3); + data.append(-2); data.append(2); + data.append(-3); + data.append(-3); data.append(2); - data.append(0); data.append(-3); data.append(0); - data.append(1); + data.append(-3); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32_broadcast/input_1.cairo b/tests/nodes/less_i32_broadcast/input_1.cairo index b6b877733..eaf75c0c9 100644 --- a/tests/nodes/less_i32_broadcast/input_1.cairo +++ b/tests/nodes/less_i32_broadcast/input_1.cairo @@ -10,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); + data.append(-3); + data.append(-3); data.append(-1); - data.append(-2); - data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i32_broadcast/output_0.cairo b/tests/nodes/less_i32_broadcast/output_0.cairo index 31e21d966..ba4559935 100644 --- a/tests/nodes/less_i32_broadcast/output_0.cairo +++ b/tests/nodes/less_i32_broadcast/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8.cairo b/tests/nodes/less_i8.cairo index 0ccc89ef6..78f839f5b 100644 --- a/tests/nodes/less_i8.cairo +++ b/tests/nodes/less_i8.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::I8TensorPartialEq; use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::I8TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_i8/input_0.cairo b/tests/nodes/less_i8/input_0.cairo index bf97a41b0..1a4e34dba 100644 --- a/tests/nodes/less_i8/input_0.cairo +++ b/tests/nodes/less_i8/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(-3); - data.append(0); - data.append(-3); - data.append(-2); - data.append(2); - data.append(2); - data.append(2); - data.append(2); data.append(-2); + data.append(1); data.append(0); + data.append(-1); + data.append(1); + data.append(1); data.append(-2); data.append(2); - data.append(2); data.append(-2); + data.append(-3); data.append(-2); - data.append(0); - data.append(-2); + data.append(-1); data.append(-2); + data.append(-1); + data.append(2); data.append(0); - data.append(1); - data.append(-3); + data.append(-1); data.append(2); data.append(2); - data.append(0); data.append(1); - data.append(-2); + data.append(0); + data.append(2); + data.append(2); + data.append(-1); data.append(1); + data.append(-1); + data.append(-3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8/input_1.cairo b/tests/nodes/less_i8/input_1.cairo index cf5a6620b..c9ff24a41 100644 --- a/tests/nodes/less_i8/input_1.cairo +++ b/tests/nodes/less_i8/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); + data.append(2); + data.append(1); + data.append(1); + data.append(1); data.append(-1); data.append(1); data.append(-3); - data.append(-1); - data.append(0); + data.append(-3); + data.append(2); + data.append(-3); + data.append(-2); data.append(-1); data.append(-2); data.append(1); - data.append(1); + data.append(-3); data.append(2); - data.append(-1); - data.append(-2); data.append(2); - data.append(-2); - data.append(1); data.append(1); - data.append(2); - data.append(-1); data.append(-3); data.append(2); + data.append(1); data.append(0); data.append(0); - data.append(-3); - data.append(-1); - data.append(-3); - data.append(1); - data.append(-1); + data.append(2); + data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8/output_0.cairo b/tests/nodes/less_i8/output_0.cairo index 2795730dc..7c87f420d 100644 --- a/tests/nodes/less_i8/output_0.cairo +++ b/tests/nodes/less_i8/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(true); - data.append(true); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); + data.append(1); + data.append(0); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8_broadcast.cairo b/tests/nodes/less_i8_broadcast.cairo index f56369ba4..f44a3f78b 100644 --- a/tests/nodes/less_i8_broadcast.cairo +++ b/tests/nodes/less_i8_broadcast.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::I8TensorPartialEq; use orion::operators::tensor::{I8Tensor, I8TensorAdd}; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::I8TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_i8_broadcast/input_0.cairo b/tests/nodes/less_i8_broadcast/input_0.cairo index da5b4c091..7bac81f85 100644 --- a/tests/nodes/less_i8_broadcast/input_0.cairo +++ b/tests/nodes/less_i8_broadcast/input_0.cairo @@ -11,31 +11,31 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(2); - data.append(2); - data.append(1); - data.append(2); data.append(1); + data.append(-3); data.append(1); + data.append(-1); + data.append(-1); + data.append(2); + data.append(2); data.append(2); data.append(2); data.append(0); + data.append(-1); data.append(-3); - data.append(-2); - data.append(-2); - data.append(1); data.append(1); + data.append(-1); data.append(-3); - data.append(1); - data.append(1); - data.append(0); data.append(-1); - data.append(2); data.append(-2); + data.append(2); data.append(0); - data.append(-3); + data.append(1); + data.append(1); + data.append(2); + data.append(-1); data.append(2); data.append(-2); - data.append(0); data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8_broadcast/input_1.cairo b/tests/nodes/less_i8_broadcast/input_1.cairo index 01a2cdb2d..484627d12 100644 --- a/tests/nodes/less_i8_broadcast/input_1.cairo +++ b/tests/nodes/less_i8_broadcast/input_1.cairo @@ -10,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(-3); + data.append(-2); data.append(-1); - data.append(-3); + data.append(-2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_i8_broadcast/output_0.cairo b/tests/nodes/less_i8_broadcast/output_0.cairo index fd762de3a..aeda2f157 100644 --- a/tests/nodes/less_i8_broadcast/output_0.cairo +++ b/tests/nodes/less_i8_broadcast/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32.cairo b/tests/nodes/less_u32.cairo index 78019ea42..a48774701 100644 --- a/tests/nodes/less_u32.cairo +++ b/tests/nodes/less_u32.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::U32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_u32/input_0.cairo b/tests/nodes/less_u32/input_0.cairo index bc06571cc..d75275db5 100644 --- a/tests/nodes/less_u32/input_0.cairo +++ b/tests/nodes/less_u32/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(3); + data.append(3); data.append(4); data.append(1); - data.append(0); - data.append(0); + data.append(1); data.append(1); data.append(0); + data.append(4); data.append(1); data.append(4); - data.append(2); data.append(4); data.append(5); - data.append(0); - data.append(1); - data.append(5); - data.append(1); - data.append(5); data.append(4); + data.append(1); + data.append(2); data.append(3); - data.append(0); data.append(3); data.append(4); - data.append(4); + data.append(1); data.append(5); + data.append(1); + data.append(1); data.append(5); data.append(4); - data.append(0); + data.append(1); data.append(2); + data.append(3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32/input_1.cairo b/tests/nodes/less_u32/input_1.cairo index df1accf66..65cf93685 100644 --- a/tests/nodes/less_u32/input_1.cairo +++ b/tests/nodes/less_u32/input_1.cairo @@ -10,32 +10,32 @@ fn input_1() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); - data.append(3); - data.append(4); - data.append(1); - data.append(3); data.append(1); + data.append(0); + data.append(2); data.append(5); - data.append(1); data.append(5); + data.append(2); + data.append(2); + data.append(2); + data.append(5); + data.append(4); + data.append(5); + data.append(0); data.append(3); + data.append(1); + data.append(0); data.append(0); data.append(2); - data.append(3); - data.append(4); + data.append(2); data.append(1); data.append(0); data.append(4); - data.append(5); - data.append(1); - data.append(2); + data.append(0); data.append(3); - data.append(5); data.append(0); - data.append(1); + data.append(0); + data.append(3); data.append(5); - data.append(1); - data.append(4); - data.append(4); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32/output_0.cairo b/tests/nodes/less_u32/output_0.cairo index 9c6097ec0..44d3797af 100644 --- a/tests/nodes/less_u32/output_0.cairo +++ b/tests/nodes/less_u32/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(false); - data.append(true); - data.append(true); - data.append(true); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(true); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(true); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(1); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32_broadcast.cairo b/tests/nodes/less_u32_broadcast.cairo index 28106718a..215eb22d0 100644 --- a/tests/nodes/less_u32_broadcast.cairo +++ b/tests/nodes/less_u32_broadcast.cairo @@ -3,13 +3,13 @@ mod input_1; mod output_0; -use orion::operators::tensor::BoolTensorPartialEq; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{TensorTrait, Tensor}; use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::operators::tensor::U32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; #[test] #[available_gas(2000000000)] diff --git a/tests/nodes/less_u32_broadcast/input_0.cairo b/tests/nodes/less_u32_broadcast/input_0.cairo index dd6ff0d5a..738e0e9a7 100644 --- a/tests/nodes/less_u32_broadcast/input_0.cairo +++ b/tests/nodes/less_u32_broadcast/input_0.cairo @@ -10,32 +10,32 @@ fn input_0() -> Tensor { shape.append(3); let mut data = ArrayTrait::new(); + data.append(2); + data.append(2); + data.append(2); + data.append(0); + data.append(5); + data.append(5); data.append(1); data.append(5); + data.append(5); + data.append(2); + data.append(4); data.append(4); data.append(3); - data.append(2); data.append(0); - data.append(3); + data.append(4); data.append(2); - data.append(5); data.append(1); data.append(3); - data.append(4); - data.append(2); - data.append(5); data.append(1); - data.append(0); + data.append(2); data.append(3); + data.append(1); data.append(3); data.append(4); data.append(2); + data.append(3); data.append(5); - data.append(4); - data.append(5); - data.append(5); - data.append(5); - data.append(1); - data.append(4); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32_broadcast/input_1.cairo b/tests/nodes/less_u32_broadcast/input_1.cairo index ece89984a..ddd3e1af4 100644 --- a/tests/nodes/less_u32_broadcast/input_1.cairo +++ b/tests/nodes/less_u32_broadcast/input_1.cairo @@ -10,8 +10,8 @@ fn input_1() -> Tensor { shape.append(1); let mut data = ArrayTrait::new(); - data.append(3); data.append(0); - data.append(3); + data.append(4); + data.append(5); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_u32_broadcast/output_0.cairo b/tests/nodes/less_u32_broadcast/output_0.cairo index 1717e77f9..2496ad222 100644 --- a/tests/nodes/less_u32_broadcast/output_0.cairo +++ b/tests/nodes/less_u32_broadcast/output_0.cairo @@ -1,40 +1,41 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::BoolTensor; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(3); shape.append(3); shape.append(3); let mut data = ArrayTrait::new(); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(false); - data.append(true); - data.append(false); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(0); + data.append(1); + data.append(1); + data.append(1); + data.append(0); + data.append(0); + data.append(0); + data.append(1); + data.append(1); + data.append(0); + data.append(1); + data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } From dc1dc2a3a944e1ffa75428df37cf36f324fbec9a Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 25 Mar 2024 10:01:39 +0100 Subject: [PATCH 58/68] refactor doc --- docs/framework/operators/tensor/tensor.less.md | 10 +++++----- src/operators/tensor/core.cairo | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/framework/operators/tensor/tensor.less.md b/docs/framework/operators/tensor/tensor.less.md index 65f44ba41..4013e8d93 100644 --- a/docs/framework/operators/tensor/tensor.less.md +++ b/docs/framework/operators/tensor/tensor.less.md @@ -1,7 +1,7 @@ #tensor.less ```rust - fn less(self: @Tensor, other: @Tensor) -> Tensor; + fn less(self: @Tensor, other: @Tensor) -> Tensor; ``` Check if each element of the first tensor is less than the corresponding element of the second tensor. @@ -31,7 +31,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn less_example() -> Tensor { +fn less_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); @@ -43,7 +43,7 @@ fn less_example() -> Tensor { // We can call `less` function as follows. return tensor_1.less(@tensor_2); } ->>> [false,false,false,false,false,false,true,false,false] +>>> [0,0,0,0,0,0,1,0,0] ``` Case 2: Compare tensors with different shapes @@ -53,7 +53,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn less_example() -> Tensor { +fn less_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); @@ -63,5 +63,5 @@ fn less_example() -> Tensor { // We can call `less` function as follows. return tensor_1.less(@tensor_2); } ->>> [false,false,false,false,false,false,false,true,true] +>>> [0,0,0,0,0,0,0,1,1] ``` diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index 02ed17ae9..bbd2b4905 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -1252,7 +1252,7 @@ trait TensorTrait { /// #tensor.less /// /// ```rust - /// fn less(self: @Tensor, other: @Tensor) -> Tensor; + /// fn less(self: @Tensor, other: @Tensor) -> Tensor; /// ``` /// /// Check if each element of the first tensor is less than the corresponding element of the second tensor. @@ -1282,7 +1282,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn less_example() -> Tensor { + /// fn less_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1294,7 +1294,7 @@ trait TensorTrait { /// // We can call `less` function as follows. /// return tensor_1.less(@tensor_2); /// } - /// >>> [false,false,false,false,false,false,true,false,false] + /// >>> [0,0,0,0,0,0,1,0,0] /// ``` /// /// Case 2: Compare tensors with different shapes @@ -1304,7 +1304,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn less_example() -> Tensor { + /// fn less_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1314,7 +1314,7 @@ trait TensorTrait { /// // We can call `less` function as follows. /// return tensor_1.less(@tensor_2); /// } - /// >>> [false,false,false,false,false,false,false,true,true] + /// >>> [0,0,0,0,0,0,0,1,1] /// ``` /// fn less(self: @Tensor, other: @Tensor) -> Tensor; From e7438faa6d9b00d6baf03bcb5d7ab9cf93e864e0 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 25 Mar 2024 10:06:25 +0100 Subject: [PATCH 59/68] refactor operator --- src/operators/tensor/core.cairo | 2 +- src/operators/tensor/implementations/tensor_bool.cairo | 2 +- .../tensor/implementations/tensor_complex64.cairo | 2 +- src/operators/tensor/implementations/tensor_fp16x16.cairo | 2 +- .../tensor/implementations/tensor_fp16x16wide.cairo | 2 +- src/operators/tensor/implementations/tensor_fp32x32.cairo | 2 +- src/operators/tensor/implementations/tensor_fp64x64.cairo | 2 +- src/operators/tensor/implementations/tensor_fp8x23.cairo | 2 +- .../tensor/implementations/tensor_fp8x23wide.cairo | 2 +- src/operators/tensor/implementations/tensor_i32.cairo | 2 +- src/operators/tensor/implementations/tensor_i8.cairo | 2 +- src/operators/tensor/implementations/tensor_u32.cairo | 2 +- src/operators/tensor/math/less_equal.cairo | 7 +++---- 13 files changed, 15 insertions(+), 16 deletions(-) diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index bbd2b4905..f604bb82f 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -1386,7 +1386,7 @@ trait TensorTrait { /// >>> [1,1,1,0,0,0,1,1,1] /// ``` /// - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor; + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor; /// #tensor.abs /// /// ```rust diff --git a/src/operators/tensor/implementations/tensor_bool.cairo b/src/operators/tensor/implementations/tensor_bool.cairo index 7e2421a7d..e8ca7e2d8 100644 --- a/src/operators/tensor/implementations/tensor_bool.cairo +++ b/src/operators/tensor/implementations/tensor_bool.cairo @@ -121,7 +121,7 @@ impl BoolTensor of TensorTrait { panic(array!['not supported!']) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/implementations/tensor_complex64.cairo b/src/operators/tensor/implementations/tensor_complex64.cairo index 5e4650df7..8acb0891e 100644 --- a/src/operators/tensor/implementations/tensor_complex64.cairo +++ b/src/operators/tensor/implementations/tensor_complex64.cairo @@ -138,7 +138,7 @@ impl Complex64Tensor of TensorTrait { panic(array!['not supported!']) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { panic(array!['not supported!']) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16.cairo b/src/operators/tensor/implementations/tensor_fp16x16.cairo index d5045d327..27f853df5 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16.cairo @@ -132,7 +132,7 @@ impl FP16x16Tensor of TensorTrait { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo index 42bf6e195..61485bae6 100644 --- a/src/operators/tensor/implementations/tensor_fp16x16wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp16x16wide.cairo @@ -142,7 +142,7 @@ impl FP16x16WTensor of TensorTrait { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp32x32.cairo b/src/operators/tensor/implementations/tensor_fp32x32.cairo index 260e33acc..6ea3c7d94 100644 --- a/src/operators/tensor/implementations/tensor_fp32x32.cairo +++ b/src/operators/tensor/implementations/tensor_fp32x32.cairo @@ -129,7 +129,7 @@ impl FP32x32Tensor of TensorTrait { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp64x64.cairo b/src/operators/tensor/implementations/tensor_fp64x64.cairo index 9427623fb..af955fff1 100644 --- a/src/operators/tensor/implementations/tensor_fp64x64.cairo +++ b/src/operators/tensor/implementations/tensor_fp64x64.cairo @@ -129,7 +129,7 @@ impl FP64x64Tensor of TensorTrait { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23.cairo b/src/operators/tensor/implementations/tensor_fp8x23.cairo index d614fcb89..19681e641 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23.cairo @@ -129,7 +129,7 @@ impl FP8x23Tensor of TensorTrait { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } diff --git a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo index 50000dcb8..ef65871d4 100644 --- a/src/operators/tensor/implementations/tensor_fp8x23wide.cairo +++ b/src/operators/tensor/implementations/tensor_fp8x23wide.cairo @@ -132,7 +132,7 @@ impl FP8x23WTensor of TensorTrait { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } diff --git a/src/operators/tensor/implementations/tensor_i32.cairo b/src/operators/tensor/implementations/tensor_i32.cairo index 483845743..924a6b1fd 100644 --- a/src/operators/tensor/implementations/tensor_i32.cairo +++ b/src/operators/tensor/implementations/tensor_i32.cairo @@ -130,7 +130,7 @@ impl I32Tensor of TensorTrait { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } diff --git a/src/operators/tensor/implementations/tensor_i8.cairo b/src/operators/tensor/implementations/tensor_i8.cairo index 78d55c8d0..f523c47b2 100644 --- a/src/operators/tensor/implementations/tensor_i8.cairo +++ b/src/operators/tensor/implementations/tensor_i8.cairo @@ -127,7 +127,7 @@ impl I8Tensor of TensorTrait { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } diff --git a/src/operators/tensor/implementations/tensor_u32.cairo b/src/operators/tensor/implementations/tensor_u32.cairo index 19292ae15..7aa2ade26 100644 --- a/src/operators/tensor/implementations/tensor_u32.cairo +++ b/src/operators/tensor/implementations/tensor_u32.cairo @@ -126,7 +126,7 @@ impl U32Tensor of TensorTrait { math::less::less(self, other) } - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor { math::less_equal::less_equal(self, other) } diff --git a/src/operators/tensor/math/less_equal.cairo b/src/operators/tensor/math/less_equal.cairo index 8c982a09c..dea786878 100644 --- a/src/operators/tensor/math/less_equal.cairo +++ b/src/operators/tensor/math/less_equal.cairo @@ -1,4 +1,4 @@ -use orion::operators::tensor::core::{Tensor, TensorTrait, unravel_index}; +use orion::operators::tensor::{core::{Tensor, TensorTrait, unravel_index}, I32Tensor}; use orion::operators::tensor::helpers::{ broadcast_shape, broadcast_index_mapping, len_from_shape, check_compatibility }; @@ -6,15 +6,14 @@ use orion::operators::tensor::helpers::{ /// Cf: TensorTrait::less_equal docstring fn less_equal< T, - impl UsizeFTensor: TensorTrait, impl TPartialOrd: PartialOrd, impl TCopy: Copy, impl TDrop: Drop >( y: @Tensor, z: @Tensor -) -> Tensor { +) -> Tensor { let broadcasted_shape = broadcast_shape(*y.shape, *z.shape); - let mut result: Array = array![]; + let mut result: Array = array![]; let num_elements = len_from_shape(broadcasted_shape); From 0b8f19cf20beabcfa56a51600afc4e7879c3decd Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 25 Mar 2024 10:09:14 +0100 Subject: [PATCH 60/68] update tests --- nodegen/node/less_equal.py | 20 +- tests/nodes.cairo | 1954 ++++++++--------- tests/nodes/less_equal_fp16x16.cairo | 16 +- tests/nodes/less_equal_fp16x16/input_0.cairo | 10 +- tests/nodes/less_equal_fp16x16/input_1.cairo | 8 +- tests/nodes/less_equal_fp16x16/output_0.cairo | 9 +- .../nodes/less_equal_fp16x16_broadcast.cairo | 16 +- .../input_0.cairo | 8 +- .../input_1.cairo | 4 +- .../output_0.cairo | 9 +- tests/nodes/less_equal_fp8x23.cairo | 14 +- tests/nodes/less_equal_fp8x23/input_0.cairo | 8 +- tests/nodes/less_equal_fp8x23/input_1.cairo | 4 +- tests/nodes/less_equal_fp8x23/output_0.cairo | 9 +- tests/nodes/less_equal_fp8x23_broadcast.cairo | 14 +- .../less_equal_fp8x23_broadcast/input_0.cairo | 8 +- .../less_equal_fp8x23_broadcast/input_1.cairo | 4 +- .../output_0.cairo | 9 +- tests/nodes/less_equal_i32.cairo | 14 +- tests/nodes/less_equal_i32/input_0.cairo | 7 +- tests/nodes/less_equal_i32/input_1.cairo | 9 +- tests/nodes/less_equal_i32/output_0.cairo | 7 +- tests/nodes/less_equal_i32_broadcast.cairo | 14 +- .../less_equal_i32_broadcast/input_0.cairo | 9 +- .../less_equal_i32_broadcast/input_1.cairo | 7 +- .../less_equal_i32_broadcast/output_0.cairo | 5 +- tests/nodes/less_equal_i8.cairo | 16 +- tests/nodes/less_equal_i8/input_0.cairo | 5 +- tests/nodes/less_equal_i8/input_1.cairo | 7 +- tests/nodes/less_equal_i8/output_0.cairo | 7 +- tests/nodes/less_equal_i8_broadcast.cairo | 16 +- .../less_equal_i8_broadcast/input_0.cairo | 11 +- .../less_equal_i8_broadcast/input_1.cairo | 7 +- .../less_equal_i8_broadcast/output_0.cairo | 11 +- tests/nodes/less_equal_u32.cairo | 14 +- tests/nodes/less_equal_u32/input_0.cairo | 9 +- tests/nodes/less_equal_u32/input_1.cairo | 7 +- tests/nodes/less_equal_u32/output_0.cairo | 9 +- tests/nodes/less_equal_u32_broadcast.cairo | 14 +- .../less_equal_u32_broadcast/input_0.cairo | 7 +- .../less_equal_u32_broadcast/input_1.cairo | 7 +- .../less_equal_u32_broadcast/output_0.cairo | 9 +- 42 files changed, 1187 insertions(+), 1165 deletions(-) diff --git a/nodegen/node/less_equal.py b/nodegen/node/less_equal.py index c54040331..2a29d0816 100644 --- a/nodegen/node/less_equal.py +++ b/nodegen/node/less_equal.py @@ -13,7 +13,7 @@ def default(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_u32" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -25,7 +25,7 @@ def broadcast(): x = Tensor(Dtype.U32, x.shape, x.flatten()) y = Tensor(Dtype.U32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_u32_broadcast" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -42,7 +42,7 @@ def default(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_i32" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -54,7 +54,7 @@ def broadcast(): x = Tensor(Dtype.I32, x.shape, x.flatten()) y = Tensor(Dtype.I32, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_i32_broadcast" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -71,7 +71,7 @@ def default(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_i8" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -83,7 +83,7 @@ def broadcast(): x = Tensor(Dtype.I8, x.shape, x.flatten()) y = Tensor(Dtype.I8, y.shape, y.flatten()) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_i8_broadcast" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -102,7 +102,7 @@ def default(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_fp8x23" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -116,7 +116,7 @@ def broadcast(): x.flatten(), FixedImpl.FP8x23)) y = Tensor(Dtype.FP8x23, y.shape, to_fp( y.flatten(), FixedImpl.FP8x23)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_fp8x23_broadcast" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -135,7 +135,7 @@ def default(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_fp16x16" make_test([x, y], z, "input_0.less_equal(@input_1)", name) @@ -149,7 +149,7 @@ def broadcast(): x.flatten(), FixedImpl.FP16x16)) y = Tensor(Dtype.FP16x16, y.shape, to_fp( y.flatten(), FixedImpl.FP16x16)) - z = Tensor(Dtype.U32, z.shape, z.flatten()) + z = Tensor(Dtype.I32, z.shape, z.flatten()) name = "less_equal_fp16x16_broadcast" make_test([x, y], z, "input_0.less_equal(@input_1)", name) diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 15249bc0d..244d8b0c9 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1,950 +1,950 @@ -// mod abs_fp16x16; -// mod abs_fp8x23; -// mod abs_i32; -// mod abs_i8; -// mod acos_fp16x16; -// mod acos_fp8x23; -// mod acosh_fp16x16; -// mod acosh_fp8x23; -// mod add_fp16x16; -// mod add_fp16x16_broadcast; -// mod add_fp8x23; -// mod add_fp8x23_broadcast; -// mod add_i32; -// mod add_i32_broadcast; -// mod add_i8; -// mod add_i8_broadcast; -// mod add_u32; -// mod add_u32_broadcast; -// mod argmin_fp16x16_1D_default; -// mod argmin_fp16x16_1D_keepdims_false; -// mod argmin_fp16x16_1D_last_index; -// mod argmin_fp16x16_2D_default; -// mod argmin_fp16x16_2D_keepdims_false; -// mod argmin_fp16x16_2D_last_index; -// mod argmin_fp16x16_3D_default; -// mod argmin_fp16x16_3D_keepdims_false; -// mod argmin_fp16x16_3D_last_index; -// mod argmin_fp8x23_1D_default; -// mod argmin_fp8x23_1D_keepdims_false; -// mod argmin_fp8x23_1D_last_index; -// mod argmin_fp8x23_2D_default; -// mod argmin_fp8x23_2D_keepdims_false; -// mod argmin_fp8x23_2D_last_index; -// mod argmin_fp8x23_3D_default; -// mod argmin_fp8x23_3D_keepdims_false; -// mod argmin_fp8x23_3D_last_index; -// mod argmin_i32_1D_default; -// mod argmin_i32_1D_keepdims_false; -// mod argmin_i32_1D_last_index; -// mod argmin_i32_2D_default; -// mod argmin_i32_2D_keepdims_false; -// mod argmin_i32_2D_last_index; -// mod argmin_i32_3D_default; -// mod argmin_i32_3D_keepdims_false; -// mod argmin_i32_3D_last_index; -// mod argmin_i8_1D_default; -// mod argmin_i8_1D_keepdims_false; -// mod argmin_i8_1D_last_index; -// mod argmin_i8_2D_default; -// mod argmin_i8_2D_keepdims_false; -// mod argmin_i8_2D_last_index; -// mod argmin_i8_3D_default; -// mod argmin_i8_3D_keepdims_false; -// mod argmin_i8_3D_last_index; -// mod argmin_u32_1D_default; -// mod argmin_u32_1D_keepdims_false; -// mod argmin_u32_1D_last_index; -// mod argmin_u32_2D_default; -// mod argmin_u32_2D_keepdims_false; -// mod argmin_u32_2D_last_index; -// mod argmin_u32_3D_default; -// mod argmin_u32_3D_keepdims_false; -// mod argmin_u32_3D_last_index; -// mod asin_fp16x16; -// mod asin_fp8x23; -// mod asinh_fp16x16; -// mod asinh_fp8x23; -// mod atan_fp16x16; -// mod atan_fp8x23; -// mod ceil_fp16x16; -// mod ceil_fp8x23; -// mod concat_fp16x16_1d; -// mod concat_fp16x16_2d; -// mod concat_fp16x16_3d_default; -// mod concat_fp16x16_3d_axis_1; -// mod concat_fp16x16_3d_axis_2; -// mod concat_fp16x16_3d_three_tensors_axis_1; -// mod concat_fp16x16_3d_three_tensors_axis_2; -// mod concat_fp8x23_1d; -// mod concat_fp8x23_2d; -// mod concat_fp8x23_3d_default; -// mod concat_fp8x23_3d_axis_1; -// mod concat_fp8x23_3d_axis_2; -// mod concat_fp8x23_3d_three_tensors_axis_1; -// mod concat_fp8x23_3d_three_tensors_axis_2; -// mod concat_i32_1d; -// mod concat_i32_2d; -// mod concat_i32_3d_default; -// mod concat_i32_3d_axis_1; -// mod concat_i32_3d_axis_2; -// mod concat_i32_3d_three_tensors_axis_1; -// mod concat_i32_3d_three_tensors_axis_2; -// mod concat_i8_1d; -// mod concat_i8_2d; -// mod concat_i8_3d_default; -// mod concat_i8_3d_axis_1; -// mod concat_i8_3d_axis_2; -// mod concat_i8_3d_three_tensors_axis_1; -// mod concat_i8_3d_three_tensors_axis_2; -// mod concat_u32_1d; -// mod concat_u32_2d; -// mod concat_u32_3d_default; -// mod concat_u32_3d_axis_1; -// mod concat_u32_3d_axis_2; -// mod concat_u32_3d_three_tensors_axis_1; -// mod concat_u32_3d_three_tensors_axis_2; -// mod cos_fp16x16; -// mod cos_fp8x23; -// mod cosh_fp16x16; -// mod cosh_fp8x23; -// mod cumsum_fp16x16_1d_default; -// mod cumsum_fp16x16_1d_exclusive; -// mod cumsum_fp16x16_1d_reverse; -// mod cumsum_fp16x16_1d_reverse_exclusive; -// mod cumsum_fp16x16_2d_axis_0; -// mod cumsum_fp16x16_2d_axis_1; -// mod cumsum_fp8x23_1d_default; -// mod cumsum_fp8x23_1d_exclusive; -// mod cumsum_fp8x23_1d_reverse; -// mod cumsum_fp8x23_1d_reverse_exclusive; -// mod cumsum_fp8x23_2d_axis_0; -// mod cumsum_fp8x23_2d_axis_1; -// mod cumsum_i32_1d_default; -// mod cumsum_i32_1d_exclusive; -// mod cumsum_i32_1d_reverse; -// mod cumsum_i32_1d_reverse_exclusive; -// mod cumsum_i32_2d_axis_0; -// mod cumsum_i32_2d_axis_1; -// mod cumsum_i8_1d_default; -// mod cumsum_i8_1d_exclusive; -// mod cumsum_i8_1d_reverse; -// mod cumsum_i8_1d_reverse_exclusive; -// mod cumsum_i8_2d_axis_0; -// mod cumsum_i8_2d_axis_1; -// mod cumsum_u32_1d_default; -// mod cumsum_u32_1d_exclusive; -// mod cumsum_u32_1d_reverse; -// mod cumsum_u32_1d_reverse_exclusive; -// mod cumsum_u32_2d_axis_0; -// mod cumsum_u32_2d_axis_1; -// mod div_fp16x16; -// mod div_fp16x16_broadcast; -// mod div_fp8x23; -// mod div_fp8x23_broadcast; -// mod div_i32; -// mod div_i32_broadcast; -// mod div_i8; -// mod div_i8_broadcast; -// mod div_u32; -// mod div_u32_broadcast; -// mod equal_fp16x16; -// mod equal_fp16x16_broadcast; -// mod equal_fp8x23; -// mod equal_fp8x23_broadcast; -// mod equal_i32; -// mod equal_i32_broadcast; -// mod equal_i8; -// mod equal_i8_broadcast; -// mod equal_u32; -// mod equal_u32_broadcast; -// mod exp_fp16x16; -// mod exp_fp8x23; -// mod less_equal_fp16x16; -// mod less_equal_fp16x16_broadcast; -// mod less_equal_fp8x23; -// mod less_equal_fp8x23_broadcast; -// mod less_equal_i32; -// mod less_equal_i32_broadcast; -// mod less_equal_i8; -// mod less_equal_i8_broadcast; -// mod less_equal_u32; -// mod less_equal_u32_broadcast; -// mod greater_fp16x16; -// mod greater_fp16x16_broadcast; -// mod greater_fp8x23; -// mod greater_fp8x23_broadcast; -// mod greater_i32; -// mod greater_i32_broadcast; -// mod greater_i8; -// mod greater_i8_broadcast; -// mod greater_u32; -// mod greater_u32_broadcast; -// mod leaky_relu_fp16x16; -// mod leaky_relu_fp8x23; -// mod linear_fp16x16; -// mod linear_fp8x23; -// mod linear_i32; -// mod linear_i8; -// mod linear_u32; -// mod log_fp16x16; -// mod log_fp8x23; -// mod logsoftmax_fp16x16_axis_0; -// mod logsoftmax_fp16x16_axis_1; -// mod logsoftmax_fp8x23_axis_0; -// mod logsoftmax_fp8x23_axis_1; -// mod matmul_fp16x16_1d; -// mod matmul_fp16x16_2x2; -// mod matmul_fp16x16_2x1; -// mod matmul_fp16x16_1x2; -// mod matmul_fp8x23_1d; -// mod matmul_fp8x23_2x2; -// mod matmul_fp8x23_2x1; -// mod matmul_fp8x23_1x2; -// mod matmul_i32_1d; -// mod matmul_i32_2x2; -// mod matmul_i32_2x1; -// mod matmul_i32_1x2; -// mod matmul_i8_1d; -// mod matmul_i8_2x2; -// mod matmul_i8_2x1; -// mod matmul_i8_1x2; -// mod matmul_u32_1d; -// mod matmul_u32_2x2; -// mod matmul_u32_2x1; -// mod matmul_u32_1x2; -// mod mul_fp16x16; -// mod mul_fp16x16_broadcast; -// mod mul_fp8x23; -// mod mul_fp8x23_broadcast; -// mod mul_i32; -// mod mul_i32_broadcast; -// mod mul_i8; -// mod mul_i8_broadcast; -// mod mul_u32; -// mod mul_u32_broadcast; -// mod or_fp16x16; -// mod or_fp16x16_broadcast; -// mod or_fp8x23; -// mod or_fp8x23_broadcast; -// mod or_i32; -// mod or_i32_broadcast; -// mod or_i8; -// mod or_i8_broadcast; -// mod or_u32; -// mod or_u32_broadcast; -// mod relu_fp16x16; -// mod relu_fp8x23; -// mod relu_i32; -// mod relu_i8; -// mod sigmoid_fp16x16; -// mod sigmoid_fp8x23; -// mod sin_fp16x16; -// mod sin_fp8x23; -// mod sinh_fp16x16; -// mod sinh_fp8x23; -// mod softplus_fp8x23; -// mod softplus_fp16x16; -// mod softsign_fp8x23; -// mod softsign_fp16x16; -// mod sqrt_fp16x16; -// mod sqrt_fp8x23; -// mod sub_fp16x16; -// mod sub_fp16x16_broadcast; -// mod sub_fp8x23; -// mod sub_fp8x23_broadcast; -// mod sub_i32; -// mod sub_i32_broadcast; -// mod sub_i8; -// mod sub_i8_broadcast; -// mod sub_u32; -// mod sub_u32_broadcast; -// mod tanh_fp16x16; -// mod tanh_fp8x23; -// mod transpose_fp16x16_2d; -// mod transpose_fp16x16_3d; -// mod transpose_fp8x23_2d; -// mod transpose_fp8x23_3d; -// mod transpose_i32_2d; -// mod transpose_i32_3d; -// mod transpose_i8_2d; -// mod transpose_i8_3d; -// mod transpose_u32_2d; -// mod transpose_u32_3d; -// mod xor_fp16x16; -// mod xor_fp16x16_broadcast; -// mod xor_fp8x23; -// mod xor_fp8x23_broadcast; -// mod xor_i32; -// mod xor_i32_broadcast; -// mod xor_i8; -// mod xor_i8_broadcast; -// mod xor_u32; -// mod xor_u32_broadcast; -// mod greater_equal_fp16x16; -// mod greater_equal_fp16x16_broadcast; -// mod greater_equal_fp8x23; -// mod greater_equal_fp8x23_broadcast; -// mod greater_equal_i32; -// mod greater_equal_i32_broadcast; -// mod greater_equal_i8; -// mod greater_equal_i8_broadcast; -// mod greater_equal_u32; -// mod greater_equal_u32_broadcast; -// mod slice_fp16x16_2d; -// mod slice_fp16x16_3d; -// mod slice_fp8x23_2d; -// mod slice_fp8x23_3d; -// mod slice_i32_2d; -// mod slice_i32_3d; -// mod slice_i8_2d; -// mod slice_i8_3d; -// mod slice_u32_2d; -// mod slice_u32_3d; -// mod nonzero_fp16x16_2d; -// mod nonzero_fp16x16_3d; -// mod nonzero_fp8x23_2d; -// mod nonzero_fp8x23_3d; -// mod nonzero_i32_2d; -// mod nonzero_i32_3d; -// mod nonzero_i8_2d; -// mod nonzero_i8_3d; -// mod nonzero_u32_2d; -// mod nonzero_u32_3d; -// mod squeeze_fP16x16; -// mod squeeze_fP8x23; -// mod squeeze_i32; -// mod squeeze_i8; -// mod squeeze_u32; -// mod unsqueeze_fp16x16_2d; -// mod unsqueeze_fp16x16_3d; -// mod unsqueeze_fp8x23_2d; -// mod unsqueeze_fp8x23_3d; -// mod unsqueeze_i32_2d; -// mod unsqueeze_i32_3d; -// mod unsqueeze_i8_2d; -// mod unsqueeze_i8_3d; -// mod unsqueeze_u32_2d; -// mod unsqueeze_u32_3d; -// mod sign_fP16x16; -// mod sign_fP8x23; -// mod sign_fail; -// mod sign_i32; -// mod sign_i8; -// mod clip_fp16x16_2d; -// mod clip_fp16x16_3d; -// mod clip_fp8x23_2d; -// mod clip_fp8x23_3d; -// mod clip_i32_2d; -// mod clip_i32_3d; -// mod clip_i8_2d; -// mod clip_i8_3d; -// mod clip_u32_2d; -// mod clip_u32_3d; -// mod identity_fP16x16; -// mod identity_fP8x23; -// mod identity_i32; -// mod identity_i8; -// mod identity_u32; -// mod thresholded_relu_fp16x16; -// mod thresholded_relu_fp8x23; -// mod hard_sigmoid_fp8x23; -// mod hard_sigmoid_fp16x16; -// mod neg_fp16x16; -// mod neg_fp8x23; -// mod neg_i32; -// mod neg_i8; -// mod gemm_all_attributes; -// mod gemm_alpha; -// mod gemm_beta; -// mod gemm_default_matrix_bias; -// mod gemm_default_vector_bias; -// mod gemm_default_no_bias; -// mod gemm_transposeA; -// mod gemm_transposeB; -// mod min_fp16x16_three_tensors; -// mod min_fp16x16_broadcast_three_tensors; -// mod min_fp16x16_two_tensors; -// mod min_fp16x16_broadcast_two_tensors; -// mod min_fp8x23_three_tensors; -// mod min_fp8x23_broadcast_three_tensors; -// mod min_fp8x23_two_tensors; -// mod min_fp8x23_broadcast_two_tensors; -// mod min_i32_three_tensors; -// mod min_i32_broadcast_three_tensors; -// mod min_i32_two_tensors; -// mod min_i32_broadcast_two_tensors; -// mod min_i8_three_tensors; -// mod min_i8_broadcast_three_tensors; -// mod min_i8_two_tensors; -// mod min_i8_broadcast_two_tensors; -// mod min_u32_three_tensors; -// mod min_u32_broadcast_three_tensors; -// mod min_u32_two_tensors; -// mod min_u32_broadcast_two_tensors; -// mod where_fp16x16; -// mod where_fp16x16_broadcast; -// mod where_fp8x23; -// mod where_fp8x23_broadcast; -// mod where_i32; -// mod where_i32_broadcast; -// mod where_i8; -// mod where_i8_broadcast; -// mod where_u32; -// mod where_u32_broadcast; -// mod not_bool; -// mod round_fp16x16; -// mod round_fp8x23; -// mod max_fp16x16_three_tensors; -// mod max_fp16x16_broadcast_three_tensors; -// mod max_fp16x16_two_tensors; -// mod max_fp16x16_broadcast_two_tensors; -// mod max_fp8x23_three_tensors; -// mod max_fp8x23_broadcast_three_tensors; -// mod max_fp8x23_two_tensors; -// mod max_fp8x23_broadcast_two_tensors; -// mod max_i32_three_tensors; -// mod max_i32_broadcast_three_tensors; -// mod max_i32_two_tensors; -// mod max_i32_broadcast_two_tensors; -// mod max_i8_three_tensors; -// mod max_i8_broadcast_three_tensors; -// mod max_i8_two_tensors; -// mod max_i8_broadcast_two_tensors; -// mod max_u32_three_tensors; -// mod max_u32_broadcast_three_tensors; -// mod max_u32_two_tensors; -// mod max_u32_broadcast_two_tensors; -// mod scatter_fp16x16_3d_default; -// mod scatter_fp16x16_3d_axis1; -// mod scatter_fp16x16_3d_axis1_add; -// mod scatter_fp8x23_default; -// mod scatter_fp8x23_axis1; -// mod scatter_fp8x23_mul; -// mod scatter_i8_default; -// mod scatter_i8_axis1; -// mod scatter_i8_axis1_max; -// mod scatter_u32_default; -// mod scatter_u32_axis1; -// mod scatter_u32_add; -// mod array_feature_extractor_1D_i32; -// mod array_feature_extractor_1D_fp8x23; -// mod array_feature_extractor_1D_fp16x16; -// mod array_feature_extractor_2D_i32; -// mod array_feature_extractor_2D_fp8x23; -// mod array_feature_extractor_2D_fp16x16; -// mod array_feature_extractor_3D_i32; -// mod array_feature_extractor_3D_fp8x23; -// mod array_feature_extractor_3D_fp16x16; -// mod binarizer_fp16x16; -// mod binarizer_fp8x23; -// mod tril_fp16x16; -// mod tril_fp16x16_neg; -// mod tril_fp16x16_one_row; -// mod tril_fp16x16_out_neg; -// mod tril_fp16x16_out_pos; -// mod tril_fp16x16_pos; -// mod tril_fp16x16_square; -// mod tril_fp16x16_square_neg; -// mod tril_fp16x16_zero; -// mod triu_fp16x16; -// mod triu_fp16x16_neg; -// mod triu_fp16x16_one_row; -// mod triu_fp16x16_out_neg; -// mod triu_fp16x16_out_pos; -// mod triu_fp16x16_pos; -// mod triu_fp16x16_square; -// mod triu_fp16x16_square_neg; -// mod triu_fp16x16_zero; -// mod tril_fp8x23; -// mod tril_fp8x23_neg; -// mod tril_fp8x23_one_row; -// mod tril_fp8x23_out_neg; -// mod tril_fp8x23_out_pos; -// mod tril_fp8x23_pos; -// mod tril_fp8x23_square; -// mod tril_fp8x23_square_neg; -// mod tril_fp8x23_zero; -// mod triu_fp8x23; -// mod triu_fp8x23_neg; -// mod triu_fp8x23_one_row; -// mod triu_fp8x23_out_neg; -// mod triu_fp8x23_out_pos; -// mod triu_fp8x23_pos; -// mod triu_fp8x23_square; -// mod triu_fp8x23_square_neg; -// mod triu_fp8x23_zero; -// mod tril_i32; -// mod tril_neg_i32; -// mod tril_i32_one_row; -// mod tril_i32_out_neg; -// mod tril_i32_out_pos; -// mod tril_i32_pos; -// mod tril_i32_square; -// mod tril_i32_square_neg; -// mod tril_i32_zero; -// mod triu_i32; -// mod triu_i32_neg; -// mod triu_i32_one_row; -// mod triu_i32_out_neg; -// mod triu_i32_out_pos; -// mod triu_i32_pos; -// mod triu_i32_square; -// mod triu_i32_square_neg; -// mod triu_i32_zero; -// mod tril_i8; -// mod tril_i8_neg; -// mod tril_i8_one_row; -// mod tril_i8_out_neg; -// mod tril_i8_out_pos; -// mod tril_i8_pos; -// mod tril_i8_square; -// mod tril_i8_square_neg; -// mod tril_i8_zero; -// mod triu_i8; -// mod triu_i8_neg; -// mod triu_i8_one_row; -// mod triu_i8_out_neg; -// mod triu_i8_out_pos; -// mod triu_i8_pos; -// mod triu_i8_square; -// mod triu_i8_square_neg; -// mod triu_i8_zero; -// mod tril_u32; -// mod tril_u32_neg; -// mod tril_u32_one_row; -// mod tril_u32_out_neg; -// mod tril_u32_out_pos; -// mod tril_u32_pos; -// mod tril_u32_square; -// mod tril_u32_square_neg; -// mod tril_u32_zero; -// mod triu_u32; -// mod triu_u32_neg; -// mod triu_u32_one_row; -// mod triu_u32_out_neg; -// mod triu_u32_out_pos; -// mod triu_u32_pos; -// mod triu_u32_square; -// mod triu_u32_square_neg; -// mod triu_u32_zero; -// mod reduce_sum_square_fp16x16_export_do_not_keepdims; -// mod reduce_sum_square_fp16x16_export_keepdims; -// mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; -// mod reduce_sum_square_fp8x23_export_do_not_keepdims; -// mod reduce_sum_square_fp8x23_export_keepdims; -// mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; -// mod reduce_sum_square_i32_export_do_not_keepdims; -// mod reduce_sum_square_i32_export_keepdims; -// mod reduce_sum_square_i32_export_negative_axes_keepdims; -// mod reduce_sum_square_i8_export_do_not_keepdims; -// mod reduce_sum_square_i8_export_keepdims; -// mod reduce_sum_square_i8_export_negative_axes_keepdims; -// mod reduce_sum_square_u32_export_do_not_keepdims; -// mod reduce_sum_square_u32_export_keepdims; -// mod reduce_sum_square_u32_export_negative_axes_keepdims; -// mod reduce_l2_fp16x16_export_do_not_keepdims; -// mod reduce_l2_fp16x16_export_keepdims; -// mod reduce_l2_fp16x16_export_negative_axes_keepdims; -// mod reduce_l2_fp8x23_export_do_not_keepdims; -// mod reduce_l2_fp8x23_export_keepdims; -// mod reduce_l2_fp8x23_export_negative_axes_keepdims; -// mod reduce_l1_fp16x16_export_do_not_keepdims; -// mod reduce_l1_fp16x16_export_keepdims; -// mod reduce_l1_fp16x16_export_negative_axes_keepdims; -// mod reduce_l1_fp8x23_export_do_not_keepdims; -// mod reduce_l1_fp8x23_export_keepdims; -// mod reduce_l1_fp8x23_export_negative_axes_keepdims; -// mod reduce_l1_i32_export_do_not_keepdims; -// mod reduce_l1_i32_export_keepdims; -// mod reduce_l1_i32_export_negative_axes_keepdims; -// mod reduce_l1_i8_export_do_not_keepdims; -// mod reduce_l1_i8_export_keepdims; -// mod reduce_l1_i8_export_negative_axes_keepdims; -// mod reduce_l1_u32_export_do_not_keepdims; -// mod reduce_l1_u32_export_keepdims; -// mod reduce_l1_u32_export_negative_axes_keepdims; -// mod reduce_prod_fp16x16_1D; -// mod reduce_prod_fp16x16_2D_default; -// mod reduce_prod_fp16x16_2D_keepdims; -// mod reduce_prod_fp16x16_2D_axis_1; -// mod reduce_prod_fp8x23_1D; -// mod reduce_prod_fp8x23_2D_default; -// mod reduce_prod_fp8x23_2D_keepdims; -// mod reduce_prod_fp8x23_2D_axis_1; -// mod reduce_prod_i32_1D; -// mod reduce_prod_i32_2D_default; -// mod reduce_prod_i32_2D_keepdims; -// mod reduce_prod_i32_2D_axis_1; -// mod reduce_prod_i8_1D; -// mod reduce_prod_i8_2D_default; -// mod reduce_prod_i8_2D_keepdims; -// mod reduce_prod_i8_2D_axis_1; -// mod reduce_prod_u32_1D; -// mod reduce_prod_u32_2D_default; -// mod reduce_prod_u32_2D_keepdims; -// mod reduce_prod_u32_2D_axis_1; -// mod sequence_length_fp16x16; -// mod sequence_length_fp16x16_broadcast; -// mod sequence_length_fp8x23; -// mod sequence_length_fp8x23_broadcast; -// mod sequence_length_i32; -// mod sequence_length_i32_broadcast; -// mod sequence_length_i8; -// mod sequence_length_i8_broadcast; -// mod sequence_length_u32; -// mod sequence_length_u32_broadcast; -// mod sequence_at_u32_positive; -// mod sequence_at_u32_negative; -// mod sequence_at_fp16x16_positive; -// mod sequence_at_fp16x16_negative; -// mod sequence_at_fp8x23_positive; -// mod sequence_at_fp8x23_negative; -// mod sequence_at_i32_positive; -// mod sequence_at_i32_negative; -// mod sequence_at_i8_positive; -// mod sequence_at_i8_negative; -// mod reduce_min_fp16x16_1D; -// mod reduce_min_fp16x16_2D_default; -// mod reduce_min_fp16x16_2D_keepdims; -// mod reduce_min_fp16x16_2D_axis_1; -// mod reduce_min_fp8x23_1D; -// mod reduce_min_fp8x23_2D_default; -// mod reduce_min_fp8x23_2D_keepdims; -// mod reduce_min_fp8x23_2D_axis_1; -// mod reduce_min_i32_1D; -// mod reduce_min_i32_2D_default; -// mod reduce_min_i32_2D_keepdims; -// mod reduce_min_i32_2D_axis_1; -// mod reduce_min_i8_1D; -// mod reduce_min_i8_2D_default; -// mod reduce_min_i8_2D_keepdims; -// mod reduce_min_i8_2D_axis_1; -// mod reduce_min_u32_1D; -// mod reduce_min_u32_2D_default; -// mod reduce_min_u32_2D_keepdims; -// mod reduce_min_u32_2D_axis_1; -// mod sequence_construct_fp16x16; -// mod sequence_construct_fp8x23; -// mod sequence_construct_i32; -// mod sequence_construct_i8; -// mod sequence_construct_u32; -// mod shrink_hard_fp16x16; -// mod shrink_soft_fp16x16; -// mod shrink_hard_fp8x23; -// mod shrink_soft_fp8x23; -// mod sequence_empty_fp16x16; -// mod sequence_empty_fp8x23; -// mod sequence_empty_i32; -// mod sequence_empty_i8; -// mod sequence_empty_u32; -// mod reduce_mean_fp16x16_1D; -// mod reduce_mean_fp16x16_2D_default; -// mod reduce_mean_fp16x16_2D_keepdims; -// mod reduce_mean_fp16x16_2D_axis_1; -// mod reduce_mean_fp8x23_1D; -// mod reduce_mean_fp8x23_2D_default; -// mod reduce_mean_fp8x23_2D_keepdims; -// mod reduce_mean_fp8x23_2D_axis_1; -// mod reduce_mean_i32_1D; -// mod reduce_mean_i32_2D_default; -// mod reduce_mean_i32_2D_keepdims; -// mod reduce_mean_i32_2D_axis_1; -// mod reduce_mean_i8_1D; -// mod reduce_mean_i8_2D_default; -// mod reduce_mean_i8_2D_keepdims; -// mod reduce_mean_i8_2D_axis_1; -// mod reduce_mean_u32_1D; -// mod reduce_mean_u32_2D_default; -// mod reduce_mean_u32_2D_keepdims; -// mod reduce_mean_u32_2D_axis_1; -// mod pow_fp16x16; -// mod pow_fp16x16_broadcast; -// mod pow_fp8x23; -// mod pow_fp8x23_broadcast; -// mod sequence_erase_u32_positive; -// mod sequence_erase_u32_negative; -// mod sequence_erase_u32_empty; -// mod sequence_erase_fp16x16_positive; -// mod sequence_erase_fp16x16_negative; -// mod sequence_erase_fp16x16_empty; -// mod sequence_erase_fp8x23_positive; -// mod sequence_erase_fp8x23_negative; -// mod sequence_erase_fp8x23_empty; -// mod sequence_erase_i32_positive; -// mod sequence_erase_i32_negative; -// mod sequence_erase_i32_empty; -// mod sequence_erase_i8_positive; -// mod sequence_erase_i8_negative; -// mod sequence_erase_i8_empty; -// mod sequence_insert_fp16x16; -// mod sequence_insert_fp8x23; -// mod sequence_insert_i32; -// mod sequence_insert_i8; -// mod sequence_insert_u32; -// mod concat_from_sequence_fp8x23_new_axis_zero; -// mod concat_from_sequence_fp8x23_new_axis_one; -// mod concat_from_sequence_fp8x23_new_axis_default; -// mod concat_from_sequence_fp16x16_new_axis_zero; -// mod concat_from_sequence_fp16x16_new_axis_one; -// mod concat_from_sequence_fp16x16_new_axis_default; -// mod concat_from_sequence_i32_new_axis_zero; -// mod concat_from_sequence_i32_new_axis_one; -// mod concat_from_sequence_i32_new_axis_default; -// mod concat_from_sequence_i8_new_axis_zero; -// mod concat_from_sequence_i8_new_axis_one; -// mod concat_from_sequence_i8_new_axis_default; -// mod concat_from_sequence_u32_new_axis_zero; -// mod concat_from_sequence_u32_new_axis_one; -// mod concat_from_sequence_u32_new_axis_default; -// mod is_nan_fp16x16; -// mod is_nan_fp8x23; -// mod is_inf_fp16x16; -// mod is_inf_fp8x23; -// mod is_inf_i32; -// mod is_inf_i8; -// mod is_inf_u32; -// mod is_pos_inf_fp16x16; -// mod is_neg_inf_fp16x16; -// mod is_pos_inf_fp8x23; -// mod is_neg_inf_fp8x23; -// mod is_pos_inf_i32; -// mod is_neg_inf_i32; -// mod is_pos_inf_i8; -// mod is_neg_inf_i8; -// mod reduce_log_sum_fp8x23_export_do_not_keepdims; -// mod reduce_log_sum_fp8x23_export_keepdims; -// mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; -// mod reduce_log_sum_fp16x16_export_do_not_keepdims; -// mod reduce_log_sum_fp16x16_export_keepdims; -// mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; -// mod and_bool; -// mod erf_fp16x16; -// mod erf_fp8x23; -// mod unique_fp16x16_without_axis_sorted; -// mod unique_fp16x16_with_axis_zero_sorted; -// mod unique_u32_without_axis_sorted; -// mod unique_u32_without_axis_not_sorted; -// mod unique_u32_with_axis_zero_sorted; -// mod unique_u32_with_axis_zero_not_sorted; -// mod unique_u32_with_axis_one_sorted; -// mod unique_u32_with_axis_one_not_sorted; -// mod gather_nd_fp16x16_3d_default; -// mod gather_nd_fp16x16_3d_batch_dims1; -// mod gather_nd_fp16x16_3d_batch_dims2; -// mod gather_nd_fp8x23_3d_default; -// mod gather_nd_fp8x23_3d_batch_dims1; -// mod gather_nd_fp8x23_3d_batch_dims2; -// mod gather_nd_i32_3d_default; -// mod gather_nd_i32_3d_batch_dims1; -// mod gather_nd_i32_3d_batch_dims2; -// mod gather_nd_i8_3d_default; -// mod gather_nd_i8_3d_batch_dims1; -// mod gather_nd_u32_default; -// mod gather_nd_u32_batch_dims1; -// mod gather_nd_u32_batch_dims2; -// mod resize_upsample_scales_nearest; -// mod resize_downsample_scales_cubic; -// mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; -// mod resize_downsample_scales_cubic_align_corners; -// mod resize_upsample_scales_linear; -// mod resize_downsample_scales_linear_align_corners; -// mod resize_downsample_scales_nearest; -// mod resize_upsample_scales_cubic; -// mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; -// mod resize_upsample_scales_cubic_align_corners; -// mod resize_upsample_scales_cubic_asymmetric; -// mod resize_upsample_scales_linear_align_corners; -// mod resize_upsample_sizes_nearest; -// mod resize_upsample_sizes_cubic; -// mod resize_downsample_sizes_cubic; -// mod resize_downsample_sizes_nearest; -// mod resize_upsample_scales_linear_half_pixel_symmetric; -// mod resize_downsample_scales_cubic_antialias; -// mod resize_downsample_scales_linear_antialias; -// mod resize_downsample_sizes_cubic_antialias; -// mod resize_downsample_sizes_linear_pytorch_half_pixel; -// mod resize_tf_crop_and_resize; -// mod resize_tf_crop_and_resize_extrapolation_value; -// mod resize_upsample_scales_nearest_axes_2_3; -// mod resize_upsample_scales_nearest_axes_3_2; -// mod resize_upsample_sizes_nearest_axes_2_3; -// mod resize_upsample_sizes_nearest_ceil_half_pixel; -// mod resize_upsample_sizes_nearest_floor_align_corners; -// mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; -// mod resize_downsample_scales_linear_half_pixel_symmetric; -// mod resize_downsample_sizes_nearest_not_larger; -// mod resize_downsample_sizes_nearest_not_smaller; -// mod resize_tf_crop_and_resize_axes_2_3; -// mod resize_tf_crop_and_resize_axes_3_2; -// mod resize_upsample_sizes_nearest_axes_3_2; -// mod resize_upsample_sizes_nearest_not_larger; -// mod resize_upsample_sizes_nearest_not_smaller; -// mod compress_fp16x16_3d_default; -// mod compress_fp16x16_3d_axis1; -// mod compress_fp16x16_3d_axis2; -// mod compress_fp16x16_3d_axis3; -// mod compress_fp16x16_3d_noaxis; -// mod compress_fp8x23_3d_default; -// mod compress_fp8x23_3d_axis1; -// mod compress_fp8x23_3d_axis2; -// mod compress_i32_3d_default; -// mod compress_i32_3d_axis1; -// mod compress_i32_3d_axis2; -// mod compress_i8_3d_default; -// mod compress_i8_3d_axis1; -// mod compress_i8_3d_axis2; -// mod compress_u32_3d_default; -// mod compress_u32_3d_axis1; -// mod compress_u32_3d_axis2; -// mod compress_u32_3d_axis2_2; -// mod compress_u32_3d_axis3; -// mod reduce_log_sum_exp_fp32x32_export_do_not_keepdims; -// mod reduce_log_sum_exp_fp32x32_export_keepdims; -// mod reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims; -// mod layer_normalization_default_axis; -// mod layer_normalization_4d_axis0; -// mod layer_normalization_4d_axis1; -// mod layer_normalization_4d_axis2; -// mod layer_normalization_4d_axis3; -// mod layer_normalization_3d_axis0_epsilon; -// mod layer_normalization_3d_axis_negative_3_epsilon; -// mod layer_normalization_3d_axis1_epsilon; -// mod layer_normalization_3d_axis2_epsilon; -// mod layer_normalization_4d_axis_negative_4; -// mod layer_normalization_4d_axis_negative_3; -// mod layer_normalization_4d_axis_negative_2; -// mod layer_normalization_4d_axis_negative_1; -// mod layer_normalization_3d_axis_negative_2_epsilon; -// mod layer_normalization_3d_axis_negative_1_epsilon; -// mod layer_normalization_test; -// mod split_u32_1d_equal_parts; -// mod split_u32_2d_equal_parts; -// mod split_u32_zero_size; -// mod split_u32_1d_variable_parts; -// mod split_u32_2d_variable_parts; -// mod split_u32_1d_uneven; -// mod split_u32_2d_uneven; -// mod split_fp16x16_1d_equal_parts; -// mod split_fp16x16_1d_variable_parts; -// mod split_fp16x16_2d_equal_parts; -// mod split_fp16x16_2d_variable_parts; -// mod split_fp16x16_zero_size; -// mod split_fp16x16_1d_uneven; -// mod split_fp16x16_2d_uneven; -// mod grid_sample; -// mod grid_sample_cubic; -// mod grid_sample_aligncorners; -// mod grid_sample_nearest; -// mod grid_sample_nearest_aligncorner; -// mod grid_sample_padding_border; -// mod grid_sample_padding_reflection; -// mod grid_sample_padding_zeros; -// mod col2im; -// mod col2im_5D; -// mod col2im_dilations; -// mod col2im_pads; -// mod col2im_strides; -// mod random_uniform_like_fp16x16; -// mod random_uniform_like_fp8x23; -// mod range_fp8x23; -// mod range_fp16x16; -// mod range_i32; -// mod range_i8; -// mod range_u32; -// mod hann_window_fp8x23; -// mod hann_window_fp16x16; -// mod hamming_window_fp16x16; -// mod hamming_window_fp8x23; -// mod blackman_window_fp16x16; -// mod blackman_window_fp8x23; -// mod split_to_sequence_fp16x16_1d_equal_parts; -// mod split_to_sequence_fp16x16_1d_variable_parts; -// mod split_to_sequence_fp16x16_2d_equal_parts; -// mod split_to_sequence_fp16x16_2d_variable_parts; -// mod split_to_sequence_fp16x16_zero_size; -// mod split_to_sequence_fp16x16_1d_uneven; -// mod split_to_sequence_fp16x16_2d_uneven; -// mod split_to_sequence_u32_1d_equal_parts; -// mod split_to_sequence_u32_1d_variable_parts; -// mod split_to_sequence_u32_2d_equal_parts; -// mod split_to_sequence_u32_2d_variable_parts; -// mod split_to_sequence_u32_zero_size; -// mod split_to_sequence_u32_1d_uneven; -// mod split_to_sequence_u32_2d_uneven; -// mod split_to_sequence_2d_scalar; -// mod split_to_sequence_2d_nokeepdims; -// mod split_to_sequence_1d_nokeepdims; -// mod reverse_sequence_fp16x16_batch_equal_parts; -// mod reverse_sequence_fp16x16_time_equal_parts; -// mod reverse_sequence_i32_batch_equal_parts; -// mod reverse_sequence_i32_time_equal_parts; -// mod reverse_sequence_i8_batch_equal_parts; -// mod reverse_sequence_i8_time_equal_parts; -// mod reverse_sequence_u32_4x4_batch; -// mod reverse_sequence_u32_4x4_time; -// mod reverse_sequence_u32_3x3_batch; -// mod reverse_sequence_u32_3x3_time; -// mod reverse_sequence_different_dimensions_4_5; -// mod reverse_sequence_different_dimensions_2_4; -// mod reverse_sequence_different_dimensions_1_6; -// mod reverse_sequence_different_dimensions_3x9_batch; -// mod reverse_sequence_different_dimensions_3x9_time; -// mod conv_transpose; -// mod conv_transpose_1d; -// mod conv_transpose_3d; -// mod conv_transpose_attributes; -// mod conv_transpose_autopad_same; -// mod conv_transpose_dilations; -// mod conv_transpose_pads; -// mod conv_transpose_group_2; -// mod conv_transpose_group_2_image_3; -// mod depth_to_space_fp16x16; -// mod depth_to_space_fp8x23; -// mod depth_to_space_i32; -// mod depth_to_space_i8; -// mod depth_to_space_u32; -// mod space_to_depth_fp16x16; -// mod space_to_depth_fp8x23; -// mod space_to_depth_i32; -// mod space_to_depth_i8; -// mod space_to_depth_u32; -// mod scatter_nd_fp16x16_3d_default; -// mod scatter_nd_fp16x16_3d_add; -// mod scatter_nd_fp16x16_3d_mul; -// mod scatter_nd_fp16x16_3d_max; -// mod scatter_nd_fp16x16_3d_min; -// mod scatter_nd_fp8x23_3d_default; -// mod scatter_nd_fp8x23_3d_add; -// mod scatter_nd_fp8x23_3d_mul; -// mod scatter_nd_fp8x23_3d_max; -// mod scatter_nd_fp8x23_3d_min; -// mod scatter_nd_u32_default; -// mod scatter_nd_u32_add; -// mod scatter_nd_u32_mul; -// mod scatter_nd_u32_max; -// mod scatter_nd_u32_min; -// mod conv_2D_with_padding; -// mod conv_1D_no_padding; -// mod conv_1D_with_padding; -// mod conv_3D_no_padding; -// mod conv_3D_with_padding; -// mod conv_4D_no_padding; -// mod conv_2D_with_2_groups; -// mod conv_2D_with_autopad_same; -// mod conv_2D_with_strides_asymmetric_padding; -// mod conv_2D_with_strides_with_padding; -// mod conv_4D_with_padding; -// mod label_encoder_fp16x16_3d_default; -// mod label_encoder_fp8x23_default; -// mod label_encoder_i8_default; -// mod label_encoder_i32_default; -// mod label_encoder_u32_default; -// mod gather_fp16x16_3d_default; -// mod gather_fp16x16_3d_axis1; -// mod gather_fp16x16_3d_axis2; -// mod gather_negative_indices; -// mod gather_negative_axis; +mod abs_fp16x16; +mod abs_fp8x23; +mod abs_i32; +mod abs_i8; +mod acos_fp16x16; +mod acos_fp8x23; +mod acosh_fp16x16; +mod acosh_fp8x23; +mod add_fp16x16; +mod add_fp16x16_broadcast; +mod add_fp8x23; +mod add_fp8x23_broadcast; +mod add_i32; +mod add_i32_broadcast; +mod add_i8; +mod add_i8_broadcast; +mod add_u32; +mod add_u32_broadcast; +mod argmin_fp16x16_1D_default; +mod argmin_fp16x16_1D_keepdims_false; +mod argmin_fp16x16_1D_last_index; +mod argmin_fp16x16_2D_default; +mod argmin_fp16x16_2D_keepdims_false; +mod argmin_fp16x16_2D_last_index; +mod argmin_fp16x16_3D_default; +mod argmin_fp16x16_3D_keepdims_false; +mod argmin_fp16x16_3D_last_index; +mod argmin_fp8x23_1D_default; +mod argmin_fp8x23_1D_keepdims_false; +mod argmin_fp8x23_1D_last_index; +mod argmin_fp8x23_2D_default; +mod argmin_fp8x23_2D_keepdims_false; +mod argmin_fp8x23_2D_last_index; +mod argmin_fp8x23_3D_default; +mod argmin_fp8x23_3D_keepdims_false; +mod argmin_fp8x23_3D_last_index; +mod argmin_i32_1D_default; +mod argmin_i32_1D_keepdims_false; +mod argmin_i32_1D_last_index; +mod argmin_i32_2D_default; +mod argmin_i32_2D_keepdims_false; +mod argmin_i32_2D_last_index; +mod argmin_i32_3D_default; +mod argmin_i32_3D_keepdims_false; +mod argmin_i32_3D_last_index; +mod argmin_i8_1D_default; +mod argmin_i8_1D_keepdims_false; +mod argmin_i8_1D_last_index; +mod argmin_i8_2D_default; +mod argmin_i8_2D_keepdims_false; +mod argmin_i8_2D_last_index; +mod argmin_i8_3D_default; +mod argmin_i8_3D_keepdims_false; +mod argmin_i8_3D_last_index; +mod argmin_u32_1D_default; +mod argmin_u32_1D_keepdims_false; +mod argmin_u32_1D_last_index; +mod argmin_u32_2D_default; +mod argmin_u32_2D_keepdims_false; +mod argmin_u32_2D_last_index; +mod argmin_u32_3D_default; +mod argmin_u32_3D_keepdims_false; +mod argmin_u32_3D_last_index; +mod asin_fp16x16; +mod asin_fp8x23; +mod asinh_fp16x16; +mod asinh_fp8x23; +mod atan_fp16x16; +mod atan_fp8x23; +mod ceil_fp16x16; +mod ceil_fp8x23; +mod concat_fp16x16_1d; +mod concat_fp16x16_2d; +mod concat_fp16x16_3d_default; +mod concat_fp16x16_3d_axis_1; +mod concat_fp16x16_3d_axis_2; +mod concat_fp16x16_3d_three_tensors_axis_1; +mod concat_fp16x16_3d_three_tensors_axis_2; +mod concat_fp8x23_1d; +mod concat_fp8x23_2d; +mod concat_fp8x23_3d_default; +mod concat_fp8x23_3d_axis_1; +mod concat_fp8x23_3d_axis_2; +mod concat_fp8x23_3d_three_tensors_axis_1; +mod concat_fp8x23_3d_three_tensors_axis_2; +mod concat_i32_1d; +mod concat_i32_2d; +mod concat_i32_3d_default; +mod concat_i32_3d_axis_1; +mod concat_i32_3d_axis_2; +mod concat_i32_3d_three_tensors_axis_1; +mod concat_i32_3d_three_tensors_axis_2; +mod concat_i8_1d; +mod concat_i8_2d; +mod concat_i8_3d_default; +mod concat_i8_3d_axis_1; +mod concat_i8_3d_axis_2; +mod concat_i8_3d_three_tensors_axis_1; +mod concat_i8_3d_three_tensors_axis_2; +mod concat_u32_1d; +mod concat_u32_2d; +mod concat_u32_3d_default; +mod concat_u32_3d_axis_1; +mod concat_u32_3d_axis_2; +mod concat_u32_3d_three_tensors_axis_1; +mod concat_u32_3d_three_tensors_axis_2; +mod cos_fp16x16; +mod cos_fp8x23; +mod cosh_fp16x16; +mod cosh_fp8x23; +mod cumsum_fp16x16_1d_default; +mod cumsum_fp16x16_1d_exclusive; +mod cumsum_fp16x16_1d_reverse; +mod cumsum_fp16x16_1d_reverse_exclusive; +mod cumsum_fp16x16_2d_axis_0; +mod cumsum_fp16x16_2d_axis_1; +mod cumsum_fp8x23_1d_default; +mod cumsum_fp8x23_1d_exclusive; +mod cumsum_fp8x23_1d_reverse; +mod cumsum_fp8x23_1d_reverse_exclusive; +mod cumsum_fp8x23_2d_axis_0; +mod cumsum_fp8x23_2d_axis_1; +mod cumsum_i32_1d_default; +mod cumsum_i32_1d_exclusive; +mod cumsum_i32_1d_reverse; +mod cumsum_i32_1d_reverse_exclusive; +mod cumsum_i32_2d_axis_0; +mod cumsum_i32_2d_axis_1; +mod cumsum_i8_1d_default; +mod cumsum_i8_1d_exclusive; +mod cumsum_i8_1d_reverse; +mod cumsum_i8_1d_reverse_exclusive; +mod cumsum_i8_2d_axis_0; +mod cumsum_i8_2d_axis_1; +mod cumsum_u32_1d_default; +mod cumsum_u32_1d_exclusive; +mod cumsum_u32_1d_reverse; +mod cumsum_u32_1d_reverse_exclusive; +mod cumsum_u32_2d_axis_0; +mod cumsum_u32_2d_axis_1; +mod div_fp16x16; +mod div_fp16x16_broadcast; +mod div_fp8x23; +mod div_fp8x23_broadcast; +mod div_i32; +mod div_i32_broadcast; +mod div_i8; +mod div_i8_broadcast; +mod div_u32; +mod div_u32_broadcast; +mod equal_fp16x16; +mod equal_fp16x16_broadcast; +mod equal_fp8x23; +mod equal_fp8x23_broadcast; +mod equal_i32; +mod equal_i32_broadcast; +mod equal_i8; +mod equal_i8_broadcast; +mod equal_u32; +mod equal_u32_broadcast; +mod exp_fp16x16; +mod exp_fp8x23; +mod less_equal_fp16x16; +mod less_equal_fp16x16_broadcast; +mod less_equal_fp8x23; +mod less_equal_fp8x23_broadcast; +mod less_equal_i32; +mod less_equal_i32_broadcast; +mod less_equal_i8; +mod less_equal_i8_broadcast; +mod less_equal_u32; +mod less_equal_u32_broadcast; +mod greater_fp16x16; +mod greater_fp16x16_broadcast; +mod greater_fp8x23; +mod greater_fp8x23_broadcast; +mod greater_i32; +mod greater_i32_broadcast; +mod greater_i8; +mod greater_i8_broadcast; +mod greater_u32; +mod greater_u32_broadcast; +mod leaky_relu_fp16x16; +mod leaky_relu_fp8x23; +mod linear_fp16x16; +mod linear_fp8x23; +mod linear_i32; +mod linear_i8; +mod linear_u32; +mod log_fp16x16; +mod log_fp8x23; +mod logsoftmax_fp16x16_axis_0; +mod logsoftmax_fp16x16_axis_1; +mod logsoftmax_fp8x23_axis_0; +mod logsoftmax_fp8x23_axis_1; +mod matmul_fp16x16_1d; +mod matmul_fp16x16_2x2; +mod matmul_fp16x16_2x1; +mod matmul_fp16x16_1x2; +mod matmul_fp8x23_1d; +mod matmul_fp8x23_2x2; +mod matmul_fp8x23_2x1; +mod matmul_fp8x23_1x2; +mod matmul_i32_1d; +mod matmul_i32_2x2; +mod matmul_i32_2x1; +mod matmul_i32_1x2; +mod matmul_i8_1d; +mod matmul_i8_2x2; +mod matmul_i8_2x1; +mod matmul_i8_1x2; +mod matmul_u32_1d; +mod matmul_u32_2x2; +mod matmul_u32_2x1; +mod matmul_u32_1x2; +mod mul_fp16x16; +mod mul_fp16x16_broadcast; +mod mul_fp8x23; +mod mul_fp8x23_broadcast; +mod mul_i32; +mod mul_i32_broadcast; +mod mul_i8; +mod mul_i8_broadcast; +mod mul_u32; +mod mul_u32_broadcast; +mod or_fp16x16; +mod or_fp16x16_broadcast; +mod or_fp8x23; +mod or_fp8x23_broadcast; +mod or_i32; +mod or_i32_broadcast; +mod or_i8; +mod or_i8_broadcast; +mod or_u32; +mod or_u32_broadcast; +mod relu_fp16x16; +mod relu_fp8x23; +mod relu_i32; +mod relu_i8; +mod sigmoid_fp16x16; +mod sigmoid_fp8x23; +mod sin_fp16x16; +mod sin_fp8x23; +mod sinh_fp16x16; +mod sinh_fp8x23; +mod softplus_fp8x23; +mod softplus_fp16x16; +mod softsign_fp8x23; +mod softsign_fp16x16; +mod sqrt_fp16x16; +mod sqrt_fp8x23; +mod sub_fp16x16; +mod sub_fp16x16_broadcast; +mod sub_fp8x23; +mod sub_fp8x23_broadcast; +mod sub_i32; +mod sub_i32_broadcast; +mod sub_i8; +mod sub_i8_broadcast; +mod sub_u32; +mod sub_u32_broadcast; +mod tanh_fp16x16; +mod tanh_fp8x23; +mod transpose_fp16x16_2d; +mod transpose_fp16x16_3d; +mod transpose_fp8x23_2d; +mod transpose_fp8x23_3d; +mod transpose_i32_2d; +mod transpose_i32_3d; +mod transpose_i8_2d; +mod transpose_i8_3d; +mod transpose_u32_2d; +mod transpose_u32_3d; +mod xor_fp16x16; +mod xor_fp16x16_broadcast; +mod xor_fp8x23; +mod xor_fp8x23_broadcast; +mod xor_i32; +mod xor_i32_broadcast; +mod xor_i8; +mod xor_i8_broadcast; +mod xor_u32; +mod xor_u32_broadcast; +mod greater_equal_fp16x16; +mod greater_equal_fp16x16_broadcast; +mod greater_equal_fp8x23; +mod greater_equal_fp8x23_broadcast; +mod greater_equal_i32; +mod greater_equal_i32_broadcast; +mod greater_equal_i8; +mod greater_equal_i8_broadcast; +mod greater_equal_u32; +mod greater_equal_u32_broadcast; +mod slice_fp16x16_2d; +mod slice_fp16x16_3d; +mod slice_fp8x23_2d; +mod slice_fp8x23_3d; +mod slice_i32_2d; +mod slice_i32_3d; +mod slice_i8_2d; +mod slice_i8_3d; +mod slice_u32_2d; +mod slice_u32_3d; +mod nonzero_fp16x16_2d; +mod nonzero_fp16x16_3d; +mod nonzero_fp8x23_2d; +mod nonzero_fp8x23_3d; +mod nonzero_i32_2d; +mod nonzero_i32_3d; +mod nonzero_i8_2d; +mod nonzero_i8_3d; +mod nonzero_u32_2d; +mod nonzero_u32_3d; +mod squeeze_fP16x16; +mod squeeze_fP8x23; +mod squeeze_i32; +mod squeeze_i8; +mod squeeze_u32; +mod unsqueeze_fp16x16_2d; +mod unsqueeze_fp16x16_3d; +mod unsqueeze_fp8x23_2d; +mod unsqueeze_fp8x23_3d; +mod unsqueeze_i32_2d; +mod unsqueeze_i32_3d; +mod unsqueeze_i8_2d; +mod unsqueeze_i8_3d; +mod unsqueeze_u32_2d; +mod unsqueeze_u32_3d; +mod sign_fP16x16; +mod sign_fP8x23; +mod sign_fail; +mod sign_i32; +mod sign_i8; +mod clip_fp16x16_2d; +mod clip_fp16x16_3d; +mod clip_fp8x23_2d; +mod clip_fp8x23_3d; +mod clip_i32_2d; +mod clip_i32_3d; +mod clip_i8_2d; +mod clip_i8_3d; +mod clip_u32_2d; +mod clip_u32_3d; +mod identity_fP16x16; +mod identity_fP8x23; +mod identity_i32; +mod identity_i8; +mod identity_u32; +mod thresholded_relu_fp16x16; +mod thresholded_relu_fp8x23; +mod hard_sigmoid_fp8x23; +mod hard_sigmoid_fp16x16; +mod neg_fp16x16; +mod neg_fp8x23; +mod neg_i32; +mod neg_i8; +mod gemm_all_attributes; +mod gemm_alpha; +mod gemm_beta; +mod gemm_default_matrix_bias; +mod gemm_default_vector_bias; +mod gemm_default_no_bias; +mod gemm_transposeA; +mod gemm_transposeB; +mod min_fp16x16_three_tensors; +mod min_fp16x16_broadcast_three_tensors; +mod min_fp16x16_two_tensors; +mod min_fp16x16_broadcast_two_tensors; +mod min_fp8x23_three_tensors; +mod min_fp8x23_broadcast_three_tensors; +mod min_fp8x23_two_tensors; +mod min_fp8x23_broadcast_two_tensors; +mod min_i32_three_tensors; +mod min_i32_broadcast_three_tensors; +mod min_i32_two_tensors; +mod min_i32_broadcast_two_tensors; +mod min_i8_three_tensors; +mod min_i8_broadcast_three_tensors; +mod min_i8_two_tensors; +mod min_i8_broadcast_two_tensors; +mod min_u32_three_tensors; +mod min_u32_broadcast_three_tensors; +mod min_u32_two_tensors; +mod min_u32_broadcast_two_tensors; +mod where_fp16x16; +mod where_fp16x16_broadcast; +mod where_fp8x23; +mod where_fp8x23_broadcast; +mod where_i32; +mod where_i32_broadcast; +mod where_i8; +mod where_i8_broadcast; +mod where_u32; +mod where_u32_broadcast; +mod not_bool; +mod round_fp16x16; +mod round_fp8x23; +mod max_fp16x16_three_tensors; +mod max_fp16x16_broadcast_three_tensors; +mod max_fp16x16_two_tensors; +mod max_fp16x16_broadcast_two_tensors; +mod max_fp8x23_three_tensors; +mod max_fp8x23_broadcast_three_tensors; +mod max_fp8x23_two_tensors; +mod max_fp8x23_broadcast_two_tensors; +mod max_i32_three_tensors; +mod max_i32_broadcast_three_tensors; +mod max_i32_two_tensors; +mod max_i32_broadcast_two_tensors; +mod max_i8_three_tensors; +mod max_i8_broadcast_three_tensors; +mod max_i8_two_tensors; +mod max_i8_broadcast_two_tensors; +mod max_u32_three_tensors; +mod max_u32_broadcast_three_tensors; +mod max_u32_two_tensors; +mod max_u32_broadcast_two_tensors; +mod scatter_fp16x16_3d_default; +mod scatter_fp16x16_3d_axis1; +mod scatter_fp16x16_3d_axis1_add; +mod scatter_fp8x23_default; +mod scatter_fp8x23_axis1; +mod scatter_fp8x23_mul; +mod scatter_i8_default; +mod scatter_i8_axis1; +mod scatter_i8_axis1_max; +mod scatter_u32_default; +mod scatter_u32_axis1; +mod scatter_u32_add; +mod array_feature_extractor_1D_i32; +mod array_feature_extractor_1D_fp8x23; +mod array_feature_extractor_1D_fp16x16; +mod array_feature_extractor_2D_i32; +mod array_feature_extractor_2D_fp8x23; +mod array_feature_extractor_2D_fp16x16; +mod array_feature_extractor_3D_i32; +mod array_feature_extractor_3D_fp8x23; +mod array_feature_extractor_3D_fp16x16; +mod binarizer_fp16x16; +mod binarizer_fp8x23; +mod tril_fp16x16; +mod tril_fp16x16_neg; +mod tril_fp16x16_one_row; +mod tril_fp16x16_out_neg; +mod tril_fp16x16_out_pos; +mod tril_fp16x16_pos; +mod tril_fp16x16_square; +mod tril_fp16x16_square_neg; +mod tril_fp16x16_zero; +mod triu_fp16x16; +mod triu_fp16x16_neg; +mod triu_fp16x16_one_row; +mod triu_fp16x16_out_neg; +mod triu_fp16x16_out_pos; +mod triu_fp16x16_pos; +mod triu_fp16x16_square; +mod triu_fp16x16_square_neg; +mod triu_fp16x16_zero; +mod tril_fp8x23; +mod tril_fp8x23_neg; +mod tril_fp8x23_one_row; +mod tril_fp8x23_out_neg; +mod tril_fp8x23_out_pos; +mod tril_fp8x23_pos; +mod tril_fp8x23_square; +mod tril_fp8x23_square_neg; +mod tril_fp8x23_zero; +mod triu_fp8x23; +mod triu_fp8x23_neg; +mod triu_fp8x23_one_row; +mod triu_fp8x23_out_neg; +mod triu_fp8x23_out_pos; +mod triu_fp8x23_pos; +mod triu_fp8x23_square; +mod triu_fp8x23_square_neg; +mod triu_fp8x23_zero; +mod tril_i32; +mod tril_neg_i32; +mod tril_i32_one_row; +mod tril_i32_out_neg; +mod tril_i32_out_pos; +mod tril_i32_pos; +mod tril_i32_square; +mod tril_i32_square_neg; +mod tril_i32_zero; +mod triu_i32; +mod triu_i32_neg; +mod triu_i32_one_row; +mod triu_i32_out_neg; +mod triu_i32_out_pos; +mod triu_i32_pos; +mod triu_i32_square; +mod triu_i32_square_neg; +mod triu_i32_zero; +mod tril_i8; +mod tril_i8_neg; +mod tril_i8_one_row; +mod tril_i8_out_neg; +mod tril_i8_out_pos; +mod tril_i8_pos; +mod tril_i8_square; +mod tril_i8_square_neg; +mod tril_i8_zero; +mod triu_i8; +mod triu_i8_neg; +mod triu_i8_one_row; +mod triu_i8_out_neg; +mod triu_i8_out_pos; +mod triu_i8_pos; +mod triu_i8_square; +mod triu_i8_square_neg; +mod triu_i8_zero; +mod tril_u32; +mod tril_u32_neg; +mod tril_u32_one_row; +mod tril_u32_out_neg; +mod tril_u32_out_pos; +mod tril_u32_pos; +mod tril_u32_square; +mod tril_u32_square_neg; +mod tril_u32_zero; +mod triu_u32; +mod triu_u32_neg; +mod triu_u32_one_row; +mod triu_u32_out_neg; +mod triu_u32_out_pos; +mod triu_u32_pos; +mod triu_u32_square; +mod triu_u32_square_neg; +mod triu_u32_zero; +mod reduce_sum_square_fp16x16_export_do_not_keepdims; +mod reduce_sum_square_fp16x16_export_keepdims; +mod reduce_sum_square_fp16x16_export_negative_axes_keepdims; +mod reduce_sum_square_fp8x23_export_do_not_keepdims; +mod reduce_sum_square_fp8x23_export_keepdims; +mod reduce_sum_square_fp8x23_export_negative_axes_keepdims; +mod reduce_sum_square_i32_export_do_not_keepdims; +mod reduce_sum_square_i32_export_keepdims; +mod reduce_sum_square_i32_export_negative_axes_keepdims; +mod reduce_sum_square_i8_export_do_not_keepdims; +mod reduce_sum_square_i8_export_keepdims; +mod reduce_sum_square_i8_export_negative_axes_keepdims; +mod reduce_sum_square_u32_export_do_not_keepdims; +mod reduce_sum_square_u32_export_keepdims; +mod reduce_sum_square_u32_export_negative_axes_keepdims; +mod reduce_l2_fp16x16_export_do_not_keepdims; +mod reduce_l2_fp16x16_export_keepdims; +mod reduce_l2_fp16x16_export_negative_axes_keepdims; +mod reduce_l2_fp8x23_export_do_not_keepdims; +mod reduce_l2_fp8x23_export_keepdims; +mod reduce_l2_fp8x23_export_negative_axes_keepdims; +mod reduce_l1_fp16x16_export_do_not_keepdims; +mod reduce_l1_fp16x16_export_keepdims; +mod reduce_l1_fp16x16_export_negative_axes_keepdims; +mod reduce_l1_fp8x23_export_do_not_keepdims; +mod reduce_l1_fp8x23_export_keepdims; +mod reduce_l1_fp8x23_export_negative_axes_keepdims; +mod reduce_l1_i32_export_do_not_keepdims; +mod reduce_l1_i32_export_keepdims; +mod reduce_l1_i32_export_negative_axes_keepdims; +mod reduce_l1_i8_export_do_not_keepdims; +mod reduce_l1_i8_export_keepdims; +mod reduce_l1_i8_export_negative_axes_keepdims; +mod reduce_l1_u32_export_do_not_keepdims; +mod reduce_l1_u32_export_keepdims; +mod reduce_l1_u32_export_negative_axes_keepdims; +mod reduce_prod_fp16x16_1D; +mod reduce_prod_fp16x16_2D_default; +mod reduce_prod_fp16x16_2D_keepdims; +mod reduce_prod_fp16x16_2D_axis_1; +mod reduce_prod_fp8x23_1D; +mod reduce_prod_fp8x23_2D_default; +mod reduce_prod_fp8x23_2D_keepdims; +mod reduce_prod_fp8x23_2D_axis_1; +mod reduce_prod_i32_1D; +mod reduce_prod_i32_2D_default; +mod reduce_prod_i32_2D_keepdims; +mod reduce_prod_i32_2D_axis_1; +mod reduce_prod_i8_1D; +mod reduce_prod_i8_2D_default; +mod reduce_prod_i8_2D_keepdims; +mod reduce_prod_i8_2D_axis_1; +mod reduce_prod_u32_1D; +mod reduce_prod_u32_2D_default; +mod reduce_prod_u32_2D_keepdims; +mod reduce_prod_u32_2D_axis_1; +mod sequence_length_fp16x16; +mod sequence_length_fp16x16_broadcast; +mod sequence_length_fp8x23; +mod sequence_length_fp8x23_broadcast; +mod sequence_length_i32; +mod sequence_length_i32_broadcast; +mod sequence_length_i8; +mod sequence_length_i8_broadcast; +mod sequence_length_u32; +mod sequence_length_u32_broadcast; +mod sequence_at_u32_positive; +mod sequence_at_u32_negative; +mod sequence_at_fp16x16_positive; +mod sequence_at_fp16x16_negative; +mod sequence_at_fp8x23_positive; +mod sequence_at_fp8x23_negative; +mod sequence_at_i32_positive; +mod sequence_at_i32_negative; +mod sequence_at_i8_positive; +mod sequence_at_i8_negative; +mod reduce_min_fp16x16_1D; +mod reduce_min_fp16x16_2D_default; +mod reduce_min_fp16x16_2D_keepdims; +mod reduce_min_fp16x16_2D_axis_1; +mod reduce_min_fp8x23_1D; +mod reduce_min_fp8x23_2D_default; +mod reduce_min_fp8x23_2D_keepdims; +mod reduce_min_fp8x23_2D_axis_1; +mod reduce_min_i32_1D; +mod reduce_min_i32_2D_default; +mod reduce_min_i32_2D_keepdims; +mod reduce_min_i32_2D_axis_1; +mod reduce_min_i8_1D; +mod reduce_min_i8_2D_default; +mod reduce_min_i8_2D_keepdims; +mod reduce_min_i8_2D_axis_1; +mod reduce_min_u32_1D; +mod reduce_min_u32_2D_default; +mod reduce_min_u32_2D_keepdims; +mod reduce_min_u32_2D_axis_1; +mod sequence_construct_fp16x16; +mod sequence_construct_fp8x23; +mod sequence_construct_i32; +mod sequence_construct_i8; +mod sequence_construct_u32; +mod shrink_hard_fp16x16; +mod shrink_soft_fp16x16; +mod shrink_hard_fp8x23; +mod shrink_soft_fp8x23; +mod sequence_empty_fp16x16; +mod sequence_empty_fp8x23; +mod sequence_empty_i32; +mod sequence_empty_i8; +mod sequence_empty_u32; +mod reduce_mean_fp16x16_1D; +mod reduce_mean_fp16x16_2D_default; +mod reduce_mean_fp16x16_2D_keepdims; +mod reduce_mean_fp16x16_2D_axis_1; +mod reduce_mean_fp8x23_1D; +mod reduce_mean_fp8x23_2D_default; +mod reduce_mean_fp8x23_2D_keepdims; +mod reduce_mean_fp8x23_2D_axis_1; +mod reduce_mean_i32_1D; +mod reduce_mean_i32_2D_default; +mod reduce_mean_i32_2D_keepdims; +mod reduce_mean_i32_2D_axis_1; +mod reduce_mean_i8_1D; +mod reduce_mean_i8_2D_default; +mod reduce_mean_i8_2D_keepdims; +mod reduce_mean_i8_2D_axis_1; +mod reduce_mean_u32_1D; +mod reduce_mean_u32_2D_default; +mod reduce_mean_u32_2D_keepdims; +mod reduce_mean_u32_2D_axis_1; +mod pow_fp16x16; +mod pow_fp16x16_broadcast; +mod pow_fp8x23; +mod pow_fp8x23_broadcast; +mod sequence_erase_u32_positive; +mod sequence_erase_u32_negative; +mod sequence_erase_u32_empty; +mod sequence_erase_fp16x16_positive; +mod sequence_erase_fp16x16_negative; +mod sequence_erase_fp16x16_empty; +mod sequence_erase_fp8x23_positive; +mod sequence_erase_fp8x23_negative; +mod sequence_erase_fp8x23_empty; +mod sequence_erase_i32_positive; +mod sequence_erase_i32_negative; +mod sequence_erase_i32_empty; +mod sequence_erase_i8_positive; +mod sequence_erase_i8_negative; +mod sequence_erase_i8_empty; +mod sequence_insert_fp16x16; +mod sequence_insert_fp8x23; +mod sequence_insert_i32; +mod sequence_insert_i8; +mod sequence_insert_u32; +mod concat_from_sequence_fp8x23_new_axis_zero; +mod concat_from_sequence_fp8x23_new_axis_one; +mod concat_from_sequence_fp8x23_new_axis_default; +mod concat_from_sequence_fp16x16_new_axis_zero; +mod concat_from_sequence_fp16x16_new_axis_one; +mod concat_from_sequence_fp16x16_new_axis_default; +mod concat_from_sequence_i32_new_axis_zero; +mod concat_from_sequence_i32_new_axis_one; +mod concat_from_sequence_i32_new_axis_default; +mod concat_from_sequence_i8_new_axis_zero; +mod concat_from_sequence_i8_new_axis_one; +mod concat_from_sequence_i8_new_axis_default; +mod concat_from_sequence_u32_new_axis_zero; +mod concat_from_sequence_u32_new_axis_one; +mod concat_from_sequence_u32_new_axis_default; +mod is_nan_fp16x16; +mod is_nan_fp8x23; +mod is_inf_fp16x16; +mod is_inf_fp8x23; +mod is_inf_i32; +mod is_inf_i8; +mod is_inf_u32; +mod is_pos_inf_fp16x16; +mod is_neg_inf_fp16x16; +mod is_pos_inf_fp8x23; +mod is_neg_inf_fp8x23; +mod is_pos_inf_i32; +mod is_neg_inf_i32; +mod is_pos_inf_i8; +mod is_neg_inf_i8; +mod reduce_log_sum_fp8x23_export_do_not_keepdims; +mod reduce_log_sum_fp8x23_export_keepdims; +mod reduce_log_sum_fp8x23_export_negative_axes_keepdims; +mod reduce_log_sum_fp16x16_export_do_not_keepdims; +mod reduce_log_sum_fp16x16_export_keepdims; +mod reduce_log_sum_fp16x16_export_negative_axes_keepdims; +mod and_bool; +mod erf_fp16x16; +mod erf_fp8x23; +mod unique_fp16x16_without_axis_sorted; +mod unique_fp16x16_with_axis_zero_sorted; +mod unique_u32_without_axis_sorted; +mod unique_u32_without_axis_not_sorted; +mod unique_u32_with_axis_zero_sorted; +mod unique_u32_with_axis_zero_not_sorted; +mod unique_u32_with_axis_one_sorted; +mod unique_u32_with_axis_one_not_sorted; +mod gather_nd_fp16x16_3d_default; +mod gather_nd_fp16x16_3d_batch_dims1; +mod gather_nd_fp16x16_3d_batch_dims2; +mod gather_nd_fp8x23_3d_default; +mod gather_nd_fp8x23_3d_batch_dims1; +mod gather_nd_fp8x23_3d_batch_dims2; +mod gather_nd_i32_3d_default; +mod gather_nd_i32_3d_batch_dims1; +mod gather_nd_i32_3d_batch_dims2; +mod gather_nd_i8_3d_default; +mod gather_nd_i8_3d_batch_dims1; +mod gather_nd_u32_default; +mod gather_nd_u32_batch_dims1; +mod gather_nd_u32_batch_dims2; +mod resize_upsample_scales_nearest; +mod resize_downsample_scales_cubic; +mod resize_downsample_scales_cubic_A_n0p5_exclude_outside; +mod resize_downsample_scales_cubic_align_corners; +mod resize_upsample_scales_linear; +mod resize_downsample_scales_linear_align_corners; +mod resize_downsample_scales_nearest; +mod resize_upsample_scales_cubic; +mod resize_upsample_scales_cubic_A_n0p5_exclude_outside; +mod resize_upsample_scales_cubic_align_corners; +mod resize_upsample_scales_cubic_asymmetric; +mod resize_upsample_scales_linear_align_corners; +mod resize_upsample_sizes_nearest; +mod resize_upsample_sizes_cubic; +mod resize_downsample_sizes_cubic; +mod resize_downsample_sizes_nearest; +mod resize_upsample_scales_linear_half_pixel_symmetric; +mod resize_downsample_scales_cubic_antialias; +mod resize_downsample_scales_linear_antialias; +mod resize_downsample_sizes_cubic_antialias; +mod resize_downsample_sizes_linear_pytorch_half_pixel; +mod resize_tf_crop_and_resize; +mod resize_tf_crop_and_resize_extrapolation_value; +mod resize_upsample_scales_nearest_axes_2_3; +mod resize_upsample_scales_nearest_axes_3_2; +mod resize_upsample_sizes_nearest_axes_2_3; +mod resize_upsample_sizes_nearest_ceil_half_pixel; +mod resize_upsample_sizes_nearest_floor_align_corners; +mod resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric; +mod resize_downsample_scales_linear_half_pixel_symmetric; +mod resize_downsample_sizes_nearest_not_larger; +mod resize_downsample_sizes_nearest_not_smaller; +mod resize_tf_crop_and_resize_axes_2_3; +mod resize_tf_crop_and_resize_axes_3_2; +mod resize_upsample_sizes_nearest_axes_3_2; +mod resize_upsample_sizes_nearest_not_larger; +mod resize_upsample_sizes_nearest_not_smaller; +mod compress_fp16x16_3d_default; +mod compress_fp16x16_3d_axis1; +mod compress_fp16x16_3d_axis2; +mod compress_fp16x16_3d_axis3; +mod compress_fp16x16_3d_noaxis; +mod compress_fp8x23_3d_default; +mod compress_fp8x23_3d_axis1; +mod compress_fp8x23_3d_axis2; +mod compress_i32_3d_default; +mod compress_i32_3d_axis1; +mod compress_i32_3d_axis2; +mod compress_i8_3d_default; +mod compress_i8_3d_axis1; +mod compress_i8_3d_axis2; +mod compress_u32_3d_default; +mod compress_u32_3d_axis1; +mod compress_u32_3d_axis2; +mod compress_u32_3d_axis2_2; +mod compress_u32_3d_axis3; +mod reduce_log_sum_exp_fp32x32_export_do_not_keepdims; +mod reduce_log_sum_exp_fp32x32_export_keepdims; +mod reduce_log_sum_exp_fp32x32_export_negative_axes_keepdims; +mod layer_normalization_default_axis; +mod layer_normalization_4d_axis0; +mod layer_normalization_4d_axis1; +mod layer_normalization_4d_axis2; +mod layer_normalization_4d_axis3; +mod layer_normalization_3d_axis0_epsilon; +mod layer_normalization_3d_axis_negative_3_epsilon; +mod layer_normalization_3d_axis1_epsilon; +mod layer_normalization_3d_axis2_epsilon; +mod layer_normalization_4d_axis_negative_4; +mod layer_normalization_4d_axis_negative_3; +mod layer_normalization_4d_axis_negative_2; +mod layer_normalization_4d_axis_negative_1; +mod layer_normalization_3d_axis_negative_2_epsilon; +mod layer_normalization_3d_axis_negative_1_epsilon; +mod layer_normalization_test; +mod split_u32_1d_equal_parts; +mod split_u32_2d_equal_parts; +mod split_u32_zero_size; +mod split_u32_1d_variable_parts; +mod split_u32_2d_variable_parts; +mod split_u32_1d_uneven; +mod split_u32_2d_uneven; +mod split_fp16x16_1d_equal_parts; +mod split_fp16x16_1d_variable_parts; +mod split_fp16x16_2d_equal_parts; +mod split_fp16x16_2d_variable_parts; +mod split_fp16x16_zero_size; +mod split_fp16x16_1d_uneven; +mod split_fp16x16_2d_uneven; +mod grid_sample; +mod grid_sample_cubic; +mod grid_sample_aligncorners; +mod grid_sample_nearest; +mod grid_sample_nearest_aligncorner; +mod grid_sample_padding_border; +mod grid_sample_padding_reflection; +mod grid_sample_padding_zeros; +mod col2im; +mod col2im_5D; +mod col2im_dilations; +mod col2im_pads; +mod col2im_strides; +mod random_uniform_like_fp16x16; +mod random_uniform_like_fp8x23; +mod range_fp8x23; +mod range_fp16x16; +mod range_i32; +mod range_i8; +mod range_u32; +mod hann_window_fp8x23; +mod hann_window_fp16x16; +mod hamming_window_fp16x16; +mod hamming_window_fp8x23; +mod blackman_window_fp16x16; +mod blackman_window_fp8x23; +mod split_to_sequence_fp16x16_1d_equal_parts; +mod split_to_sequence_fp16x16_1d_variable_parts; +mod split_to_sequence_fp16x16_2d_equal_parts; +mod split_to_sequence_fp16x16_2d_variable_parts; +mod split_to_sequence_fp16x16_zero_size; +mod split_to_sequence_fp16x16_1d_uneven; +mod split_to_sequence_fp16x16_2d_uneven; +mod split_to_sequence_u32_1d_equal_parts; +mod split_to_sequence_u32_1d_variable_parts; +mod split_to_sequence_u32_2d_equal_parts; +mod split_to_sequence_u32_2d_variable_parts; +mod split_to_sequence_u32_zero_size; +mod split_to_sequence_u32_1d_uneven; +mod split_to_sequence_u32_2d_uneven; +mod split_to_sequence_2d_scalar; +mod split_to_sequence_2d_nokeepdims; +mod split_to_sequence_1d_nokeepdims; +mod reverse_sequence_fp16x16_batch_equal_parts; +mod reverse_sequence_fp16x16_time_equal_parts; +mod reverse_sequence_i32_batch_equal_parts; +mod reverse_sequence_i32_time_equal_parts; +mod reverse_sequence_i8_batch_equal_parts; +mod reverse_sequence_i8_time_equal_parts; +mod reverse_sequence_u32_4x4_batch; +mod reverse_sequence_u32_4x4_time; +mod reverse_sequence_u32_3x3_batch; +mod reverse_sequence_u32_3x3_time; +mod reverse_sequence_different_dimensions_4_5; +mod reverse_sequence_different_dimensions_2_4; +mod reverse_sequence_different_dimensions_1_6; +mod reverse_sequence_different_dimensions_3x9_batch; +mod reverse_sequence_different_dimensions_3x9_time; +mod conv_transpose; +mod conv_transpose_1d; +mod conv_transpose_3d; +mod conv_transpose_attributes; +mod conv_transpose_autopad_same; +mod conv_transpose_dilations; +mod conv_transpose_pads; +mod conv_transpose_group_2; +mod conv_transpose_group_2_image_3; +mod depth_to_space_fp16x16; +mod depth_to_space_fp8x23; +mod depth_to_space_i32; +mod depth_to_space_i8; +mod depth_to_space_u32; +mod space_to_depth_fp16x16; +mod space_to_depth_fp8x23; +mod space_to_depth_i32; +mod space_to_depth_i8; +mod space_to_depth_u32; +mod scatter_nd_fp16x16_3d_default; +mod scatter_nd_fp16x16_3d_add; +mod scatter_nd_fp16x16_3d_mul; +mod scatter_nd_fp16x16_3d_max; +mod scatter_nd_fp16x16_3d_min; +mod scatter_nd_fp8x23_3d_default; +mod scatter_nd_fp8x23_3d_add; +mod scatter_nd_fp8x23_3d_mul; +mod scatter_nd_fp8x23_3d_max; +mod scatter_nd_fp8x23_3d_min; +mod scatter_nd_u32_default; +mod scatter_nd_u32_add; +mod scatter_nd_u32_mul; +mod scatter_nd_u32_max; +mod scatter_nd_u32_min; +mod conv_2D_with_padding; +mod conv_1D_no_padding; +mod conv_1D_with_padding; +mod conv_3D_no_padding; +mod conv_3D_with_padding; +mod conv_4D_no_padding; +mod conv_2D_with_2_groups; +mod conv_2D_with_autopad_same; +mod conv_2D_with_strides_asymmetric_padding; +mod conv_2D_with_strides_with_padding; +mod conv_4D_with_padding; +mod label_encoder_fp16x16_3d_default; +mod label_encoder_fp8x23_default; +mod label_encoder_i8_default; +mod label_encoder_i32_default; +mod label_encoder_u32_default; +mod gather_fp16x16_3d_default; +mod gather_fp16x16_3d_axis1; +mod gather_fp16x16_3d_axis2; +mod gather_negative_indices; +mod gather_negative_axis; mod less_fp16x16; mod less_fp16x16_broadcast; mod less_fp8x23; @@ -955,33 +955,33 @@ mod less_i8; mod less_i8_broadcast; mod less_u32; mod less_u32_broadcast; -// mod reshape_extended_dims; -// mod reshape_negative_dim; -// mod reshape_negative_extended_dims; -// mod reshape_one_dim; -// mod reshape_reduced_dims; -// mod reshape_reordered_all_dims; -// mod reshape_reordered_last_dims; -// mod reshape_zero_and_negative_dim; -// mod reshape_zero_dim; -// mod reduce_sum_default_axes_keepdims; -// mod reduce_sum_empty_axes_input_noop; -// mod reduce_sum_keep_dims; -// mod reduce_sum_negative_axes_keepdims; -// mod reduce_sum_no_keep_dims; -// mod gather_elements_default; -// mod gather_elements_axis1; -// mod gather_elements_axis2; -// mod gather_elements_negative_indices; -// mod softmax_axis_0; -// mod softmax_axis_1; -// mod softmax_axis_2; -// mod softmax_axis_minus_1; -// mod argmax_default_axes_keepdims; -// mod argmax_default_axes_keepdims_select_last_index; -// mod argmax_keepdims; -// mod argmax_keepdims_select_last_index; -// mod argmax_negative_axis_keepdims; -// mod argmax_negative_axis_keepdims_select_last_index; -// mod argmax_no_keepdims; -// mod argmax_no_keepdims_select_last_index; +mod reshape_extended_dims; +mod reshape_negative_dim; +mod reshape_negative_extended_dims; +mod reshape_one_dim; +mod reshape_reduced_dims; +mod reshape_reordered_all_dims; +mod reshape_reordered_last_dims; +mod reshape_zero_and_negative_dim; +mod reshape_zero_dim; +mod reduce_sum_default_axes_keepdims; +mod reduce_sum_empty_axes_input_noop; +mod reduce_sum_keep_dims; +mod reduce_sum_negative_axes_keepdims; +mod reduce_sum_no_keep_dims; +mod gather_elements_default; +mod gather_elements_axis1; +mod gather_elements_axis2; +mod gather_elements_negative_indices; +mod softmax_axis_0; +mod softmax_axis_1; +mod softmax_axis_2; +mod softmax_axis_minus_1; +mod argmax_default_axes_keepdims; +mod argmax_default_axes_keepdims_select_last_index; +mod argmax_keepdims; +mod argmax_keepdims_select_last_index; +mod argmax_negative_axis_keepdims; +mod argmax_negative_axis_keepdims_select_last_index; +mod argmax_no_keepdims; +mod argmax_no_keepdims_select_last_index; diff --git a/tests/nodes/less_equal_fp16x16.cairo b/tests/nodes/less_equal_fp16x16.cairo index 19fe42d2d..d08953d94 100644 --- a/tests/nodes/less_equal_fp16x16.cairo +++ b/tests/nodes/less_equal_fp16x16.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::FP16x16TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_less_equal_fp16x16() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less_equal(@input_1); + let y_0 = input_0.less_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_equal_fp16x16/input_0.cairo b/tests/nodes/less_equal_fp16x16/input_0.cairo index ec0027d22..7d6981164 100644 --- a/tests/nodes/less_equal_fp16x16/input_0.cairo +++ b/tests/nodes/less_equal_fp16x16/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp16x16/input_1.cairo b/tests/nodes/less_equal_fp16x16/input_1.cairo index 5cc407dc5..1adb39195 100644 --- a/tests/nodes/less_equal_fp16x16/input_1.cairo +++ b/tests/nodes/less_equal_fp16x16/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -9,9 +9,9 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 131072, sign: false }); - data.append(FP16x16 { mag: 131072, sign: false }); data.append(FP16x16 { mag: 131072, sign: true }); - data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 131072, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp16x16/output_0.cairo b/tests/nodes/less_equal_fp16x16/output_0.cairo index 7e2cee38d..b066124bb 100644 --- a/tests/nodes/less_equal_fp16x16/output_0.cairo +++ b/tests/nodes/less_equal_fp16x16/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); + data.append(0); + data.append(1); data.append(1); data.append(1); - data.append(0); - data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp16x16_broadcast.cairo b/tests/nodes/less_equal_fp16x16_broadcast.cairo index 6ca29eb78..ef67204ff 100644 --- a/tests/nodes/less_equal_fp16x16_broadcast.cairo +++ b/tests/nodes/less_equal_fp16x16_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::FP16x16TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::operators::tensor::FP16x16TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_less_equal_fp16x16_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less_equal(@input_1); + let y_0 = input_0.less_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_equal_fp16x16_broadcast/input_0.cairo b/tests/nodes/less_equal_fp16x16_broadcast/input_0.cairo index 13261de0d..35099fc12 100644 --- a/tests/nodes/less_equal_fp16x16_broadcast/input_0.cairo +++ b/tests/nodes/less_equal_fp16x16_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_0() -> Tensor { @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(FP16x16 { mag: 65536, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 131072, sign: true }); + data.append(FP16x16 { mag: 65536, sign: false }); data.append(FP16x16 { mag: 65536, sign: false }); - data.append(FP16x16 { mag: 65536, sign: true }); - data.append(FP16x16 { mag: 196608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp16x16_broadcast/input_1.cairo b/tests/nodes/less_equal_fp16x16_broadcast/input_1.cairo index 25a595400..7e68b68a9 100644 --- a/tests/nodes/less_equal_fp16x16_broadcast/input_1.cairo +++ b/tests/nodes/less_equal_fp16x16_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; use orion::numbers::{FixedTrait, FP16x16}; fn input_1() -> Tensor { @@ -10,6 +10,6 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP16x16 { mag: 0, sign: false }); - data.append(FP16x16 { mag: 196608, sign: true }); + data.append(FP16x16 { mag: 65536, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp16x16_broadcast/output_0.cairo b/tests/nodes/less_equal_fp16x16_broadcast/output_0.cairo index 31cf673d4..abcbdb1de 100644 --- a/tests/nodes/less_equal_fp16x16_broadcast/output_0.cairo +++ b/tests/nodes/less_equal_fp16x16_broadcast/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); data.append(1); - data.append(0); - data.append(1); data.append(1); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp8x23.cairo b/tests/nodes/less_equal_fp8x23.cairo index 3ee472dce..0c18c9338 100644 --- a/tests/nodes/less_equal_fp8x23.cairo +++ b/tests/nodes/less_equal_fp8x23.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; -use orion::operators::tensor::U32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_less_equal_fp8x23() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less_equal(@input_1); + let y_0 = input_0.less_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_equal_fp8x23/input_0.cairo b/tests/nodes/less_equal_fp8x23/input_0.cairo index cac7e356e..110d6b273 100644 --- a/tests/nodes/less_equal_fp8x23/input_0.cairo +++ b/tests/nodes/less_equal_fp8x23/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp8x23/input_1.cairo b/tests/nodes/less_equal_fp8x23/input_1.cairo index 6a5e5a086..8804a2fb7 100644 --- a/tests/nodes/less_equal_fp8x23/input_1.cairo +++ b/tests/nodes/less_equal_fp8x23/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -11,7 +11,7 @@ fn input_1() -> Tensor { let mut data = ArrayTrait::new(); data.append(FP8x23 { mag: 0, sign: false }); data.append(FP8x23 { mag: 8388608, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); + data.append(FP8x23 { mag: 16777216, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp8x23/output_0.cairo b/tests/nodes/less_equal_fp8x23/output_0.cairo index 07948a48e..0367c57b6 100644 --- a/tests/nodes/less_equal_fp8x23/output_0.cairo +++ b/tests/nodes/less_equal_fp8x23/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); data.append(1); - data.append(0); + data.append(1); + data.append(1); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp8x23_broadcast.cairo b/tests/nodes/less_equal_fp8x23_broadcast.cairo index 8cf36a6ba..12b528d3d 100644 --- a/tests/nodes/less_equal_fp8x23_broadcast.cairo +++ b/tests/nodes/less_equal_fp8x23_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; -use orion::operators::tensor::U32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; use orion::operators::tensor::FP8x23TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_less_equal_fp8x23_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less_equal(@input_1); + let y_0 = input_0.less_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_equal_fp8x23_broadcast/input_0.cairo b/tests/nodes/less_equal_fp8x23_broadcast/input_0.cairo index 597e948e1..4ab421020 100644 --- a/tests/nodes/less_equal_fp8x23_broadcast/input_0.cairo +++ b/tests/nodes/less_equal_fp8x23_broadcast/input_0.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_0() -> Tensor { @@ -9,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 8388608, sign: false }); data.append(FP8x23 { mag: 0, sign: false }); - data.append(FP8x23 { mag: 16777216, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); - data.append(FP8x23 { mag: 25165824, sign: true }); + data.append(FP8x23 { mag: 0, sign: false }); + data.append(FP8x23 { mag: 16777216, sign: false }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp8x23_broadcast/input_1.cairo b/tests/nodes/less_equal_fp8x23_broadcast/input_1.cairo index 6a7c55548..95b4fb754 100644 --- a/tests/nodes/less_equal_fp8x23_broadcast/input_1.cairo +++ b/tests/nodes/less_equal_fp8x23_broadcast/input_1.cairo @@ -1,6 +1,6 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv}; +use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorAdd}; use orion::numbers::{FixedTrait, FP8x23}; fn input_1() -> Tensor { @@ -9,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); + data.append(FP8x23 { mag: 25165824, sign: true }); data.append(FP8x23 { mag: 16777216, sign: false }); - data.append(FP8x23 { mag: 8388608, sign: true }); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_fp8x23_broadcast/output_0.cairo b/tests/nodes/less_equal_fp8x23_broadcast/output_0.cairo index 62010885f..5614176ce 100644 --- a/tests/nodes/less_equal_fp8x23_broadcast/output_0.cairo +++ b/tests/nodes/less_equal_fp8x23_broadcast/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); + data.append(0); data.append(1); - data.append(1); - data.append(1); + data.append(0); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i32.cairo b/tests/nodes/less_equal_i32.cairo index 3072a59b0..474974374 100644 --- a/tests/nodes/less_equal_i32.cairo +++ b/tests/nodes/less_equal_i32.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::I32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_less_equal_i32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less_equal(@input_1); + let y_0 = input_0.less_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_equal_i32/input_0.cairo b/tests/nodes/less_equal_i32/input_0.cairo index 11c8e73ff..cae2bd7f1 100644 --- a/tests/nodes/less_equal_i32/input_0.cairo +++ b/tests/nodes/less_equal_i32/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,8 +10,8 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(-3); + data.append(2); data.append(-3); - data.append(-2); - data.append(1); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i32/input_1.cairo b/tests/nodes/less_equal_i32/input_1.cairo index 330426cd7..219f31bdc 100644 --- a/tests/nodes/less_equal_i32/input_1.cairo +++ b/tests/nodes/less_equal_i32/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,9 +9,9 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); + data.append(-2); data.append(2); - data.append(2); - data.append(-3); - data.append(1); + data.append(-2); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i32/output_0.cairo b/tests/nodes/less_equal_i32/output_0.cairo index 8442d0d0c..0367c57b6 100644 --- a/tests/nodes/less_equal_i32/output_0.cairo +++ b/tests/nodes/less_equal_i32/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); @@ -10,7 +11,7 @@ fn output_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(1); data.append(1); - data.append(0); + data.append(1); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i32_broadcast.cairo b/tests/nodes/less_equal_i32_broadcast.cairo index 3657b38d9..7b498fc0d 100644 --- a/tests/nodes/less_equal_i32_broadcast.cairo +++ b/tests/nodes/less_equal_i32_broadcast.cairo @@ -3,22 +3,20 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; -use orion::operators::tensor::I32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_less_equal_i32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less_equal(@input_1); + let y_0 = input_0.less_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_equal_i32_broadcast/input_0.cairo b/tests/nodes/less_equal_i32_broadcast/input_0.cairo index 9f1d44f37..e6f5a6f14 100644 --- a/tests/nodes/less_equal_i32_broadcast/input_0.cairo +++ b/tests/nodes/less_equal_i32_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,8 +10,8 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(-2); - data.append(1); - data.append(-3); - data.append(-2); + data.append(2); + data.append(2); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i32_broadcast/input_1.cairo b/tests/nodes/less_equal_i32_broadcast/input_1.cairo index dc3c54f94..be5599aac 100644 --- a/tests/nodes/less_equal_i32_broadcast/input_1.cairo +++ b/tests/nodes/less_equal_i32_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I32Tensor, I32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-2); - data.append(-1); + data.append(2); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i32_broadcast/output_0.cairo b/tests/nodes/less_equal_i32_broadcast/output_0.cairo index 31cf673d4..085034f13 100644 --- a/tests/nodes/less_equal_i32_broadcast/output_0.cairo +++ b/tests/nodes/less_equal_i32_broadcast/output_0.cairo @@ -1,8 +1,9 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); diff --git a/tests/nodes/less_equal_i8.cairo b/tests/nodes/less_equal_i8.cairo index c86a70ec1..6594b7bd1 100644 --- a/tests/nodes/less_equal_i8.cairo +++ b/tests/nodes/less_equal_i8.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::I8TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_less_equal_i8() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less_equal(@input_1); + let y_0 = input_0.less_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_equal_i8/input_0.cairo b/tests/nodes/less_equal_i8/input_0.cairo index 4f53a978e..ec1b63b85 100644 --- a/tests/nodes/less_equal_i8/input_0.cairo +++ b/tests/nodes/less_equal_i8/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,7 +10,7 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(-3); - data.append(-1); + data.append(2); data.append(-1); data.append(-2); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/less_equal_i8/input_1.cairo b/tests/nodes/less_equal_i8/input_1.cairo index 6cb982144..7389f1082 100644 --- a/tests/nodes/less_equal_i8/input_1.cairo +++ b/tests/nodes/less_equal_i8/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,9 +9,9 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); data.append(-2); - data.append(1); + data.append(2); + data.append(-1); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i8/output_0.cairo b/tests/nodes/less_equal_i8/output_0.cairo index 31cf673d4..0367c57b6 100644 --- a/tests/nodes/less_equal_i8/output_0.cairo +++ b/tests/nodes/less_equal_i8/output_0.cairo @@ -1,15 +1,16 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); data.append(1); - data.append(0); + data.append(1); data.append(1); data.append(1); TensorTrait::new(shape.span(), data.span()) diff --git a/tests/nodes/less_equal_i8_broadcast.cairo b/tests/nodes/less_equal_i8_broadcast.cairo index ac53e3aa6..070cd2f57 100644 --- a/tests/nodes/less_equal_i8_broadcast.cairo +++ b/tests/nodes/less_equal_i8_broadcast.cairo @@ -3,22 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use orion::operators::tensor::I8TensorPartialEq; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::I32TensorPartialEq; use orion::utils::{assert_eq, assert_seq_eq}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::I8TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_less_equal_i8_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less_equal(@input_1); + let y_0 = input_0.less_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_equal_i8_broadcast/input_0.cairo b/tests/nodes/less_equal_i8_broadcast/input_0.cairo index 835e66354..f0422b9f4 100644 --- a/tests/nodes/less_equal_i8_broadcast/input_0.cairo +++ b/tests/nodes/less_equal_i8_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(2); - data.append(2); - data.append(2); - data.append(-3); + data.append(-1); + data.append(0); + data.append(0); + data.append(-1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i8_broadcast/input_1.cairo b/tests/nodes/less_equal_i8_broadcast/input_1.cairo index 02ff8facd..03f8b18c6 100644 --- a/tests/nodes/less_equal_i8_broadcast/input_1.cairo +++ b/tests/nodes/less_equal_i8_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{I8Tensor, I8TensorDiv}; +use orion::operators::tensor::{I8Tensor, I8TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(-3); - data.append(-3); + data.append(2); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_i8_broadcast/output_0.cairo b/tests/nodes/less_equal_i8_broadcast/output_0.cairo index 9a2391c78..0367c57b6 100644 --- a/tests/nodes/less_equal_i8_broadcast/output_0.cairo +++ b/tests/nodes/less_equal_i8_broadcast/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); - data.append(0); - data.append(0); + data.append(1); + data.append(1); + data.append(1); data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_u32.cairo b/tests/nodes/less_equal_u32.cairo index 8a1e7aab4..c9ec171ca 100644 --- a/tests/nodes/less_equal_u32.cairo +++ b/tests/nodes/less_equal_u32.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_less_equal_u32() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less_equal(@input_1); + let y_0 = input_0.less_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_equal_u32/input_0.cairo b/tests/nodes/less_equal_u32/input_0.cairo index 84b61d7cc..4f912fb5d 100644 --- a/tests/nodes/less_equal_u32/input_0.cairo +++ b/tests/nodes/less_equal_u32/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,9 +9,9 @@ fn input_0() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); + data.append(0); data.append(2); - data.append(5); - data.append(5); + data.append(0); + data.append(0); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_u32/input_1.cairo b/tests/nodes/less_equal_u32/input_1.cairo index fe6539464..05bf5f2e6 100644 --- a/tests/nodes/less_equal_u32/input_1.cairo +++ b/tests/nodes/less_equal_u32/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,9 +9,9 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); - data.append(4); + data.append(2); data.append(5); data.append(3); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_u32/output_0.cairo b/tests/nodes/less_equal_u32/output_0.cairo index de313d890..0367c57b6 100644 --- a/tests/nodes/less_equal_u32/output_0.cairo +++ b/tests/nodes/less_equal_u32/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); - data.append(0); data.append(1); data.append(1); - data.append(0); + data.append(1); + data.append(1); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_u32_broadcast.cairo b/tests/nodes/less_equal_u32_broadcast.cairo index dc695687d..b30f5cd1e 100644 --- a/tests/nodes/less_equal_u32_broadcast.cairo +++ b/tests/nodes/less_equal_u32_broadcast.cairo @@ -3,20 +3,22 @@ mod input_1; mod output_0; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; -use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::U32TensorPartialEq; use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::operators::tensor::I32TensorPartialEq; +use orion::operators::tensor::U32TensorPartialEq; +use orion::operators::tensor::{TensorTrait, Tensor}; #[test] #[available_gas(2000000000)] fn test_less_equal_u32_broadcast() { let input_0 = input_0::input_0(); let input_1 = input_1::input_1(); - let z = output_0::output_0(); + let z_0 = output_0::output_0(); - let y = input_0.less_equal(@input_1); + let y_0 = input_0.less_equal(@input_1); - assert_eq(y, z); + assert_eq(y_0, z_0); } diff --git a/tests/nodes/less_equal_u32_broadcast/input_0.cairo b/tests/nodes/less_equal_u32_broadcast/input_0.cairo index a6bf00a7c..2d8435a2f 100644 --- a/tests/nodes/less_equal_u32_broadcast/input_0.cairo +++ b/tests/nodes/less_equal_u32_broadcast/input_0.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_0() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -9,8 +10,8 @@ fn input_0() -> Tensor { let mut data = ArrayTrait::new(); data.append(0); - data.append(5); - data.append(4); data.append(0); + data.append(3); + data.append(3); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_u32_broadcast/input_1.cairo b/tests/nodes/less_equal_u32_broadcast/input_1.cairo index 8e7328b81..7ee1bbea4 100644 --- a/tests/nodes/less_equal_u32_broadcast/input_1.cairo +++ b/tests/nodes/less_equal_u32_broadcast/input_1.cairo @@ -1,6 +1,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{U32Tensor, U32TensorAdd}; +use orion::numbers::NumberTrait; fn input_1() -> Tensor { let mut shape = ArrayTrait::::new(); @@ -8,7 +9,7 @@ fn input_1() -> Tensor { shape.append(2); let mut data = ArrayTrait::new(); - data.append(1); - data.append(1); + data.append(3); + data.append(2); TensorTrait::new(shape.span(), data.span()) } diff --git a/tests/nodes/less_equal_u32_broadcast/output_0.cairo b/tests/nodes/less_equal_u32_broadcast/output_0.cairo index ef770fa07..897d076d9 100644 --- a/tests/nodes/less_equal_u32_broadcast/output_0.cairo +++ b/tests/nodes/less_equal_u32_broadcast/output_0.cairo @@ -1,16 +1,17 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor}; -use orion::operators::tensor::{U32Tensor, U32TensorDiv}; +use orion::operators::tensor::{I32Tensor, I32TensorAdd}; +use orion::numbers::NumberTrait; -fn output_0() -> Tensor { +fn output_0() -> Tensor { let mut shape = ArrayTrait::::new(); shape.append(2); shape.append(2); let mut data = ArrayTrait::new(); data.append(1); - data.append(0); - data.append(0); data.append(1); + data.append(1); + data.append(0); TensorTrait::new(shape.span(), data.span()) } From 0db954f09cbbbae5d830eefe28c0238705d6c40c Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 25 Mar 2024 10:10:00 +0100 Subject: [PATCH 61/68] update doc --- docs/framework/operators/tensor/tensor.less_equal.md | 8 ++++---- src/operators/tensor/core.cairo | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/framework/operators/tensor/tensor.less_equal.md b/docs/framework/operators/tensor/tensor.less_equal.md index c440b39c6..8e7943f91 100644 --- a/docs/framework/operators/tensor/tensor.less_equal.md +++ b/docs/framework/operators/tensor/tensor.less_equal.md @@ -1,7 +1,7 @@ #tensor.less_equal ```rust - fn less_equal(self: @Tensor, other: @Tensor) -> Tensor; + fn less_equal(self: @Tensor, other: @Tensor) -> Tensor; ``` Check if each element of the first tensor is less than or equal to the corresponding element of the second tensor. @@ -20,7 +20,7 @@ The input tensors must have either: ## Returns -A new `Tensor` of booleans (0 or 1) with the same shape as the broadcasted inputs. +A new `Tensor` of booleans (0 or 1) with the same shape as the broadcasted inputs. ## Examples @@ -31,7 +31,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn less_equal_example() -> Tensor { +fn less_equal_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); @@ -53,7 +53,7 @@ use core::array::{ArrayTrait, SpanTrait}; use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; -fn less_equal_example() -> Tensor { +fn less_equal_example() -> Tensor { let tensor_1 = TensorTrait::::new( shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), ); diff --git a/src/operators/tensor/core.cairo b/src/operators/tensor/core.cairo index f604bb82f..02f9cc6e4 100644 --- a/src/operators/tensor/core.cairo +++ b/src/operators/tensor/core.cairo @@ -1321,7 +1321,7 @@ trait TensorTrait { /// #tensor.less_equal /// /// ```rust - /// fn less_equal(self: @Tensor, other: @Tensor) -> Tensor; + /// fn less_equal(self: @Tensor, other: @Tensor) -> Tensor; /// ``` /// /// Check if each element of the first tensor is less than or equal to the corresponding element of the second tensor. @@ -1340,7 +1340,7 @@ trait TensorTrait { /// /// ## Returns /// - /// A new `Tensor` of booleans (0 or 1) with the same shape as the broadcasted inputs. + /// A new `Tensor` of booleans (0 or 1) with the same shape as the broadcasted inputs. /// /// ## Examples /// @@ -1351,7 +1351,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn less_equal_example() -> Tensor { + /// fn less_equal_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); @@ -1373,7 +1373,7 @@ trait TensorTrait { /// /// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor}; /// - /// fn less_equal_example() -> Tensor { + /// fn less_equal_example() -> Tensor { /// let tensor_1 = TensorTrait::::new( /// shape: array![3, 3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(), /// ); From 9c62277084487c05a7e99872cf70108c4cd41354 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Wed, 27 Mar 2024 09:25:58 +0100 Subject: [PATCH 62/68] bump orion version --- Scarb.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Scarb.toml b/Scarb.toml index f05fa6649..091b7de0e 100644 --- a/Scarb.toml +++ b/Scarb.toml @@ -1,6 +1,6 @@ [package] name = "orion" -version = "0.2.4" +version = "0.2.5" cairo-version = "2.5.3" edition = "2023_10" description = "ONNX Runtime in Cairo for verifiable ML inference using STARK" From 57871a4c9eb4a14dde2bfc03eefa88adf0e6b298 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Wed, 27 Mar 2024 17:22:54 +0100 Subject: [PATCH 63/68] fix gather_elements --- .../tensor/math/gather_elements.cairo | 94 +++++++------------ 1 file changed, 35 insertions(+), 59 deletions(-) diff --git a/src/operators/tensor/math/gather_elements.cairo b/src/operators/tensor/math/gather_elements.cairo index cc8b9ae20..e4b624e42 100644 --- a/src/operators/tensor/math/gather_elements.cairo +++ b/src/operators/tensor/math/gather_elements.cairo @@ -1,7 +1,9 @@ +use core::option::OptionTrait; +use core::traits::TryInto; use alexandria_data_structures::array_ext::SpanTraitExt; use orion::numbers::NumberTrait; -use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{{TensorTrait, Tensor}, core::{unravel_index, stride}}; /// Cf: TensorTrait::gather_elements docstring fn gather_elements, impl TCopy: Copy, impl TDrop: Drop,>( @@ -19,71 +21,45 @@ fn gather_elements, impl TCopy: Copy, im }; assert(axis < (*self.shape).len(), 'axis out of dimensions'); - let axis_shape = *(*self.shape).at(axis); - - // Adjust indices that are negative - let mut adjusted_indices = array![]; - let mut indices_data = indices.data.clone(); - loop { - match indices_data.pop_front() { - Option::Some(index) => { - let adjusted_index: usize = if *index < 0 { - let val: u32 = (axis_shape.try_into().unwrap() + *index).try_into().unwrap(); - val - } else { - let val: u32 = (*index).try_into().unwrap(); - val - }; - assert(adjusted_index >= 0 && adjusted_index < axis_shape, 'Index out of bounds'); - adjusted_indices.append(adjusted_index); - }, - Option::None => { break; } - }; - }; + let data_strides = stride(*self.shape); let mut output_data = array![]; - let mut data_shape_clone = (*self.shape).clone(); - let mut multiplier = 1; - let mut looper = 1; - let mut ind = 0; - loop { - match data_shape_clone.pop_front() { - Option::Some(val) => { - if ind >= axis { - multiplier *= *val; - } - if ind > axis { - looper *= *val; - } - ind += 1; - }, - Option::None => { break; } - }; - }; + let mut i: usize = 0; + while i < indices + .data + .len() { + let indice = *indices.data.at(i); + let adjusted_indice: u32 = if indice < 0 { + ((*(*self.shape).at(axis)).try_into().unwrap() + indice).try_into().unwrap() + } else { + indice.try_into().unwrap() + }; - let inner_loop = multiplier / axis_shape; - let mut adjusted_indices_iter = adjusted_indices.clone(); + assert(adjusted_indice < (*(*self.shape).at(axis)), 'Index out of bounds'); - let mut i: usize = 0; - loop { - match adjusted_indices_iter.pop_front() { - Option::Some(indice) => { - let value = if axis == 0 { - indice * inner_loop + (i % inner_loop) - } else if axis == (*self.shape).len() - 1 { - indice + axis_shape * (i / axis_shape) - } else { - indice * looper - + (i % looper) - + (multiplier / axis_shape) * (i / (multiplier / axis_shape)) + let multidim_index = unravel_index(i, indices.shape); + let mut flat_index_for_data = 0; + + let mut j: usize = 0; + while j < multidim_index + .len() { + let dim_index = *multidim_index.at(j); + if j == axis { + flat_index_for_data += adjusted_indice * (*data_strides.at(j)); + } else { + flat_index_for_data += (dim_index * *data_strides.at(j)) + } + j += 1; }; - output_data.append(*self.data[value]); - i += 1; - }, - Option::None => { break; } + assert( + flat_index_for_data < (*self.data).len().try_into().unwrap(), + 'Flat index out of bounds' + ); + + output_data.append(*(*self.data).at(flat_index_for_data)); + i += 1; }; - }; TensorTrait::::new(indices.shape, output_data.span()) } From ad74d400650de1c3ca4245f7085700360a9153db Mon Sep 17 00:00:00 2001 From: chachaleo Date: Thu, 28 Mar 2024 20:40:39 +0100 Subject: [PATCH 64/68] feat: tree ensemble --- docgen/src/main.rs | 8 + .../machine-learning/tree-ensemble/README.md | 22 + .../tree-ensemble/tree_ensemble.predict.md | 139 ++++ src/operators/ml.cairo | 3 + src/operators/ml/tree_ensemble.cairo | 1 + .../ml/tree_ensemble/tree_ensemble.cairo | 602 ++++++++++++++++++ tests/lib.cairo | 1 + tests/ml.cairo | 1 + tests/ml/tree_ensemble_test.cairo | 300 +++++++++ 9 files changed, 1077 insertions(+) create mode 100644 docs/framework/operators/machine-learning/tree-ensemble/README.md create mode 100644 docs/framework/operators/machine-learning/tree-ensemble/tree_ensemble.predict.md create mode 100644 src/operators/ml/tree_ensemble/tree_ensemble.cairo create mode 100644 tests/ml/tree_ensemble_test.cairo diff --git a/docgen/src/main.rs b/docgen/src/main.rs index 8d1f90f4b..fb7dba8f1 100644 --- a/docgen/src/main.rs +++ b/docgen/src/main.rs @@ -59,6 +59,14 @@ fn main() { doc_trait(trait_path, doc_path, label); doc_functions(trait_path, doc_path, trait_name, label); + // TREE ENSEMBLE DOC + let trait_path = "src/operators/ml/tree_ensemble/tree_ensemble.cairo"; + let doc_path = "docs/framework/operators/machine-learning/tree-ensemble"; + let label = "tree_ensemble"; + let trait_name: &str = "TreeEnsembleTrait"; + doc_trait(trait_path, doc_path, label); + doc_functions(trait_path, doc_path, trait_name, label); + // LINEAR REGRESSOR DOC let trait_path = "src/operators/ml/linear/linear_regressor.cairo"; let doc_path = "docs/framework/operators/machine-learning/linear-regressor"; diff --git a/docs/framework/operators/machine-learning/tree-ensemble/README.md b/docs/framework/operators/machine-learning/tree-ensemble/README.md new file mode 100644 index 000000000..26fcfb205 --- /dev/null +++ b/docs/framework/operators/machine-learning/tree-ensemble/README.md @@ -0,0 +1,22 @@ +# Tree Ensemble + +`TreeEnsembleTrait` provides a trait definition for tree ensemble problem. + +```rust +use orion::operators::ml::TreeEnsembleTrait; +``` + +### Data types + +Orion supports currently only fixed point data types for `TreeEnsembleTrait`. + +| Data type | dtype | +| -------------------- | ------------------------------------------------------------- | +| Fixed point (signed) | `TreeEnsembleTrait` | + + +*** + +| function | description | +| --- | --- | +| [`tree_ensemble.predict`](tree_ensemble.predict.md) | Returns the regressed values for each input in a batch. | \ No newline at end of file diff --git a/docs/framework/operators/machine-learning/tree-ensemble/tree_ensemble.predict.md b/docs/framework/operators/machine-learning/tree-ensemble/tree_ensemble.predict.md new file mode 100644 index 000000000..a7f97e96d --- /dev/null +++ b/docs/framework/operators/machine-learning/tree-ensemble/tree_ensemble.predict.md @@ -0,0 +1,139 @@ +# TreeEnsemble::predict + +```rust + fn predict(X: @Tensor, + nodes_splits: Tensor, + nodes_featureids: Span, + nodes_modes: Span, + nodes_truenodeids: Span, + nodes_falsenodeids: Span, + nodes_trueleafs: Span, + nodes_falseleafs: Span, + leaf_targetids: Span, + leaf_weights: Tensor, + tree_roots: Span, + post_transform: POST_TRANSFORM, + aggregate_function: AGGREGATE_FUNCTION, + nodes_hitrates: Option>, + nodes_missing_value_tracks_true: Option>, + membership_values: Option>, + n_targets: usize + ) -> MutMatrix::; +``` + +Tree Ensemble operator. Returns the regressed values for each input in a batch. Inputs have dimensions [N, F] where N is the input batch size and F is the number of input features. Outputs have dimensions [N, num_targets] where N is the batch size and num_targets is the number of targets, which is a configurable attribute. + +## Args + +* `X`: Input 2D tensor. +* `nodes_splits`: Thresholds to do the splitting on for each node with mode that is not 'BRANCH_MEMBER'. +* `nodes_featureids`: Feature id for each node. +* `nodes_modes`: The comparison operation performed by the node. This is encoded as an enumeration of 'NODE_MODE::LEQ', 'NODE_MODE::LT', 'NODE_MODE::GTE', 'NODE_MODE::GT', 'NODE_MODE::EQ', 'NODE_MODE::NEQ', and 'NODE_MODE::MEMBER' +* `nodes_truenodeids`: If `nodes_trueleafs` is 0 (false) at an entry, this represents the position of the true branch node. +* `nodes_falsenodeids`: If `nodes_falseleafs` is 0 (false) at an entry, this represents the position of the false branch node. +* `nodes_trueleafs`: 1 if true branch is leaf for each node and 0 an interior node. +* `nodes_falseleafs`: 1 if true branch is leaf for each node and 0 an interior node. +* `leaf_targetids`: The index of the target that this leaf contributes to (this must be in range `[0, n_targets)`). +* `leaf_weights`: The weight for each leaf. +* `tree_roots`: Index into `nodes_*` for the root of each tree. The tree structure is derived from the branching of each node. +* `post_transform`: Indicates the transform to apply to the score.One of 'POST_TRANSFORM::NONE', 'POST_TRANSFORM::SOFTMAX', 'POST_TRANSFORM::LOGISTIC', 'POST_TRANSFORM::SOFTMAX_ZERO' or 'POST_TRANSFORM::PROBIT' , +* `aggregate_function`: Defines how to aggregate leaf values within a target. One of 'AGGREGATE_FUNCTION::AVERAGE', 'AGGREGATE_FUNCTION::SUM', 'AGGREGATE_FUNCTION::MIN', 'AGGREGATE_FUNCTION::MAX` defaults to 'AGGREGATE_FUNCTION::SUM' +* `nodes_hitrates`: Popularity of each node, used for performance and may be omitted. +* `nodes_missing_value_tracks_true`: For each node, define whether to follow the true branch (if attribute value is 1) or false branch (if attribute value is 0) in the presence of a NaN input feature. This attribute may be left undefined and the default value is false (0) for all nodes. +* `membership_values`: Members to test membership of for each set membership node. List all of the members to test again in the order that the 'BRANCH_MEMBER' mode appears in `node_modes`, delimited by `NaN`s. Will have the same number of sets of values as nodes with mode 'BRANCH_MEMBER'. This may be omitted if the node doesn't contain any 'BRANCH_MEMBER' nodes. +* `n_targets`: The total number of targets. + + +## Returns + +* Output of shape [Batch Size, Number of targets] + +## Type Constraints + +`TreeEnsembleClassifier` and `X` must be fixed points + +## Examples + +```rust +use orion::numbers::FP16x16; +use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor}; +use orion::operators::ml::{TreeEnsembleTrait,POST_TRANSFORM, AGGREGATE_FUNCTION, NODE_MODE}; +use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; +use orion::numbers::NumberTrait; + +fn example_tree_ensemble_one_tree() -> MutMatrix:: { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 78643, sign: false }); + data.append(FP16x16 { mag: 222822, sign: false }); + data.append(FP16x16 { mag: 7864, sign: true }); + data.append(FP16x16 { mag: 108789, sign: false }); + data.append(FP16x16 { mag: 271319, sign: false }); + data.append(FP16x16 { mag: 115998, sign: false }); + let mut X = TensorTrait::new(shape.span(), data.span()); + + let mut shape = ArrayTrait::::new(); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 342753, sign: false }); + data.append(FP16x16 { mag: 794296, sign: false }); + data.append(FP16x16 { mag: 801505, sign: true }); + data.append(FP16x16 { mag: 472514, sign: false }); + let leaf_weights = TensorTrait::new(shape.span(), data.span()); + + let mut shape = ArrayTrait::::new(); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 205783, sign: false }); + data.append(FP16x16 { mag: 78643, sign: false }); + data.append(FP16x16 { mag: 275251, sign: false }); + let nodes_splits = TensorTrait::new(shape.span(), data.span()); + + let membership_values = Option::None; + + let n_targets = 2; + let aggregate_function = AGGREGATE_FUNCTION::SUM; + let nodes_missing_value_tracks_true = Option::None; + let nodes_hitrates = Option::None; + let post_transform = POST_TRANSFORM::NONE; + + let tree_roots: Span = array![0].span(); + let nodes_modes: Span = array![MODE::LEQ, MODE::LEQ, MODE::LEQ].span(); + + let nodes_featureids: Span = array![0, 0, 0].span(); + let nodes_truenodeids: Span = array![1, 0, 1].span(); + let nodes_trueleafs: Span = array![0, 1, 1].span(); + let nodes_falsenodeids: Span = array![2, 2, 3].span(); + let nodes_falseleafs: Span = array![0, 1, 1].span(); + let leaf_targetids: Span = array![0, 1, 0, 1].span(); + + return TreeEnsembleTrait::predict( + @X, + nodes_splits, + nodes_featureids, + nodes_modes, + nodes_truenodeids, + nodes_falsenodeids, + nodes_trueleafs, + nodes_falseleafs, + leaf_targetids, + leaf_weights, + tree_roots, + post_transform, + aggregate_function, + nodes_hitrates, + nodes_missing_value_tracks_true, + membership_values, + n_targets + ); +} + +>>> [[ 5.23 0. ] + [ 5.23 0. ] + [ 0. 12.12]] +``` diff --git a/src/operators/ml.cairo b/src/operators/ml.cairo index 08e9e40fb..93a0394a6 100644 --- a/src/operators/ml.cairo +++ b/src/operators/ml.cairo @@ -3,6 +3,8 @@ mod linear; mod svm; mod normalizer; +use orion::operators::ml::tree_ensemble::tree_ensemble::{TreeEnsembleTrait}; + use orion::operators::ml::tree_ensemble::core::{ TreeEnsemble, TreeEnsembleAttributes, TreeEnsembleImpl, NODE_MODES }; @@ -32,3 +34,4 @@ enum POST_TRANSFORM { SOFTMAXZERO, PROBIT, } + diff --git a/src/operators/ml/tree_ensemble.cairo b/src/operators/ml/tree_ensemble.cairo index 32c96c0bd..925c1ea7e 100644 --- a/src/operators/ml/tree_ensemble.cairo +++ b/src/operators/ml/tree_ensemble.cairo @@ -1,3 +1,4 @@ mod core; mod tree_ensemble_classifier; mod tree_ensemble_regressor; +mod tree_ensemble; diff --git a/src/operators/ml/tree_ensemble/tree_ensemble.cairo b/src/operators/ml/tree_ensemble/tree_ensemble.cairo new file mode 100644 index 000000000..51e3f9ec2 --- /dev/null +++ b/src/operators/ml/tree_ensemble/tree_ensemble.cairo @@ -0,0 +1,602 @@ +use orion::operators::tensor::{Tensor, TensorTrait}; +use orion::numbers::NumberTrait; + +use orion::operators::matrix::{MutMatrix, MutMatrixImpl, MutMatrixTrait}; + +#[derive(Copy, Drop)] +enum AGGREGATE_FUNCTION { + AVERAGE, + SUM, + MIN, + MAX, +} + +#[derive(Copy, Drop)] +enum POST_TRANSFORM { + NONE, + SOFTMAX, + LOGISTIC, + SOFTMAX_ZERO, + PROBIT, +} + +#[derive(Copy, Drop)] +enum NODE_MODE { + LEQ, + LT, + GTE, + GT, + EQ, + NEQ, + MEMBER, +} + +/// Trait +/// +/// predict - Returns the regressed values for each input in a batch. +trait TreeEnsembleTrait { + /// # TreeEnsemble::predict + /// + /// ```rust + /// fn predict(X: @Tensor, + /// nodes_splits: Tensor, + /// nodes_featureids: Span, + /// nodes_modes: Span, + /// nodes_truenodeids: Span, + /// nodes_falsenodeids: Span, + /// nodes_trueleafs: Span, + /// nodes_falseleafs: Span, + /// leaf_targetids: Span, + /// leaf_weights: Tensor, + /// tree_roots: Span, + /// post_transform: POST_TRANSFORM, + /// aggregate_function: AGGREGATE_FUNCTION, + /// nodes_hitrates: Option>, + /// nodes_missing_value_tracks_true: Option>, + /// membership_values: Option>, + /// n_targets: usize + /// ) -> MutMatrix::; + /// ``` + /// + /// Tree Ensemble operator. Returns the regressed values for each input in a batch. Inputs have dimensions [N, F] where N is the input batch size and F is the number of input features. Outputs have dimensions [N, num_targets] where N is the batch size and num_targets is the number of targets, which is a configurable attribute. + /// + /// ## Args + /// + /// * `X`: Input 2D tensor. + /// * `nodes_splits`: Thresholds to do the splitting on for each node with mode that is not 'BRANCH_MEMBER'. + /// * `nodes_featureids`: Feature id for each node. + /// * `nodes_modes`: The comparison operation performed by the node. This is encoded as an enumeration of 'NODE_MODE::LEQ', 'NODE_MODE::LT', 'NODE_MODE::GTE', 'NODE_MODE::GT', 'NODE_MODE::EQ', 'NODE_MODE::NEQ', and 'NODE_MODE::MEMBER' + /// * `nodes_truenodeids`: If `nodes_trueleafs` is 0 (false) at an entry, this represents the position of the true branch node. + /// * `nodes_falsenodeids`: If `nodes_falseleafs` is 0 (false) at an entry, this represents the position of the false branch node. + /// * `nodes_trueleafs`: 1 if true branch is leaf for each node and 0 an interior node. + /// * `nodes_falseleafs`: 1 if true branch is leaf for each node and 0 an interior node. + /// * `leaf_targetids`: The index of the target that this leaf contributes to (this must be in range `[0, n_targets)`). + /// * `leaf_weights`: The weight for each leaf. + /// * `tree_roots`: Index into `nodes_*` for the root of each tree. The tree structure is derived from the branching of each node. + /// * `post_transform`: Indicates the transform to apply to the score.One of 'POST_TRANSFORM::NONE', 'POST_TRANSFORM::SOFTMAX', 'POST_TRANSFORM::LOGISTIC', 'POST_TRANSFORM::SOFTMAX_ZERO' or 'POST_TRANSFORM::PROBIT' , + /// * `aggregate_function`: Defines how to aggregate leaf values within a target. One of 'AGGREGATE_FUNCTION::AVERAGE', 'AGGREGATE_FUNCTION::SUM', 'AGGREGATE_FUNCTION::MIN', 'AGGREGATE_FUNCTION::MAX` defaults to 'AGGREGATE_FUNCTION::SUM' + /// * `nodes_hitrates`: Popularity of each node, used for performance and may be omitted. + /// * `nodes_missing_value_tracks_true`: For each node, define whether to follow the true branch (if attribute value is 1) or false branch (if attribute value is 0) in the presence of a NaN input feature. This attribute may be left undefined and the default value is false (0) for all nodes. + /// * `membership_values`: Members to test membership of for each set membership node. List all of the members to test again in the order that the 'BRANCH_MEMBER' mode appears in `node_modes`, delimited by `NaN`s. Will have the same number of sets of values as nodes with mode 'BRANCH_MEMBER'. This may be omitted if the node doesn't contain any 'BRANCH_MEMBER' nodes. + /// * `n_targets`: The total number of targets. + + /// + /// ## Returns + /// + /// * Output of shape [Batch Size, Number of targets] + /// + /// ## Type Constraints + /// + /// `T` must be fixed point + /// + /// ## Examples + /// + /// ```rust + /// use orion::numbers::FP16x16; + /// use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor}; + /// use orion::operators::ml::{TreeEnsembleTrait,POST_TRANSFORM, AGGREGATE_FUNCTION, NODE_MODE}; + /// use orion::operators::matrix::{MutMatrix, MutMatrixImpl}; + /// use orion::numbers::NumberTrait; + /// + /// fn example_tree_ensemble_one_tree() -> MutMatrix:: { + /// let mut shape = ArrayTrait::::new(); + /// shape.append(3); + /// shape.append(2); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(FP16x16 { mag: 78643, sign: false }); + /// data.append(FP16x16 { mag: 222822, sign: false }); + /// data.append(FP16x16 { mag: 7864, sign: true }); + /// data.append(FP16x16 { mag: 108789, sign: false }); + /// data.append(FP16x16 { mag: 271319, sign: false }); + /// data.append(FP16x16 { mag: 115998, sign: false }); + /// let mut X = TensorTrait::new(shape.span(), data.span()); + /// + /// let mut shape = ArrayTrait::::new(); + /// shape.append(4); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(FP16x16 { mag: 342753, sign: false }); + /// data.append(FP16x16 { mag: 794296, sign: false }); + /// data.append(FP16x16 { mag: 801505, sign: true }); + /// data.append(FP16x16 { mag: 472514, sign: false }); + /// let leaf_weights = TensorTrait::new(shape.span(), data.span()); + /// + /// let mut shape = ArrayTrait::::new(); + /// shape.append(3); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(FP16x16 { mag: 205783, sign: false }); + /// data.append(FP16x16 { mag: 78643, sign: false }); + /// data.append(FP16x16 { mag: 275251, sign: false }); + /// let nodes_splits = TensorTrait::new(shape.span(), data.span()); + /// + /// let membership_values = Option::None; + /// + /// let n_targets = 2; + /// let aggregate_function = AGGREGATE_FUNCTION::SUM; + /// let nodes_missing_value_tracks_true = Option::None; + /// let nodes_hitrates = Option::None; + /// let post_transform = POST_TRANSFORM::NONE; + /// + /// let tree_roots: Span = array![0].span(); + /// let nodes_modes: Span = array![MODE::LEQ, MODE::LEQ, MODE::LEQ].span(); + /// + /// let nodes_featureids: Span = array![0, 0, 0].span(); + /// let nodes_truenodeids: Span = array![1, 0, 1].span(); + /// let nodes_trueleafs: Span = array![0, 1, 1].span(); + /// let nodes_falsenodeids: Span = array![2, 2, 3].span(); + /// let nodes_falseleafs: Span = array![0, 1, 1].span(); + /// let leaf_targetids: Span = array![0, 1, 0, 1].span(); + /// + /// return TreeEnsembleTrait::predict( + /// @X, + /// nodes_splits, + /// nodes_featureids, + /// nodes_modes, + /// nodes_truenodeids, + /// nodes_falsenodeids, + /// nodes_trueleafs, + /// nodes_falseleafs, + /// leaf_targetids, + /// leaf_weights, + /// tree_roots, + /// post_transform, + /// aggregate_function, + /// nodes_hitrates, + /// nodes_missing_value_tracks_true, + /// membership_values, + /// n_targets + /// ); + /// } + /// + /// >>> [[ 5.23 0. ] + /// [ 5.23 0. ] + /// [ 0. 12.12]] + /// ``` + /// + fn predict( + X: @Tensor, + nodes_splits: Tensor, + nodes_featureids: Span, + nodes_modes: Span, + nodes_truenodeids: Span, + nodes_falsenodeids: Span, + nodes_trueleafs: Span, + nodes_falseleafs: Span, + leaf_targetids: Span, + leaf_weights: Tensor, + tree_roots: Span, + post_transform: POST_TRANSFORM, + aggregate_function: AGGREGATE_FUNCTION, + nodes_hitrates: Option>, + nodes_missing_value_tracks_true: Option>, + membership_values: Option>, + n_targets: usize + ) -> MutMatrix::; +} + + +impl TreeEnsembleImpl< + T, + MAG, + +TensorTrait, + +NumberTrait, + +Copy, + +Drop, + +PartialOrd, + +PartialEq, + +Add, + +Div, + +Mul, + +Into, + +AddEq, +> of TreeEnsembleTrait { + fn predict( + X: @Tensor, + nodes_splits: Tensor, + nodes_featureids: Span, + nodes_modes: Span, + nodes_truenodeids: Span, + nodes_falsenodeids: Span, + nodes_trueleafs: Span, + nodes_falseleafs: Span, + leaf_targetids: Span, + leaf_weights: Tensor, + tree_roots: Span, + post_transform: POST_TRANSFORM, + aggregate_function: AGGREGATE_FUNCTION, + nodes_hitrates: Option>, + nodes_missing_value_tracks_true: Option>, + membership_values: Option>, + n_targets: usize + ) -> MutMatrix:: { + let batch_size = *(*X).shape.at(0); + let n_features = *(*X).shape.at(1); + let n_trees = tree_roots.len(); + + let mut set_membership_iter = array![].span(); + let mut map_member_to_nodeid = Default::default(); + + let mut res: MutMatrix = MutMatrixImpl::new(batch_size, n_targets); + + let (nodes_missing_value_tracks_true, nodes_missing_value_tracks_true_flag) = + match nodes_missing_value_tracks_true { + Option::Some(nodes_missing_value_tracks_true) => { + (nodes_missing_value_tracks_true, true) + }, + Option::None => { (array![].span(), false) } + }; + + match membership_values { + Option::Some(membership_values) => { + set_membership_iter = membership_values.data.clone(); + + let mut tree_roots_iter = tree_roots.clone(); + loop { + match tree_roots_iter.pop_front() { + Option::Some(root_index) => { + let root_index = *root_index; + let is_leaf = (*nodes_trueleafs.at(root_index) == 1 + && *nodes_falseleafs.at(root_index) == 1 + && *nodes_truenodeids + .at(root_index) == *nodes_falsenodeids + .at(root_index)); + map_members_to_nodeids( + root_index, + is_leaf, + nodes_modes, + nodes_truenodeids, + nodes_falsenodeids, + nodes_trueleafs, + nodes_falseleafs, + ref set_membership_iter, + ref map_member_to_nodeid, + ); + }, + Option::None => { break; } + } + }; + }, + Option::None => {} + }; + + match aggregate_function { + AGGREGATE_FUNCTION::AVERAGE => { res.set(batch_size, n_targets, NumberTrait::zero()); }, + AGGREGATE_FUNCTION::SUM => { res.set(batch_size, n_targets, NumberTrait::zero()); }, + AGGREGATE_FUNCTION::MIN => { + let mut i = 0; + while i != batch_size { + let mut j = 0; + while j != n_targets { + res.set(i, j, NumberTrait::min_value()); + j += 1; + }; + i += 1; + }; + }, + AGGREGATE_FUNCTION::MAX => { + let mut i = 0; + while i != batch_size { + let mut j = 0; + while j != n_targets { + res.set(i, j, NumberTrait::max_value()); + j += 1; + }; + i += 1; + }; + }, + } + + let mut weights = ArrayTrait::new(); + let mut target_ids = ArrayTrait::new(); + + let mut tree_roots_iter = tree_roots.clone(); + loop { + match tree_roots_iter.pop_front() { + Option::Some(root_index) => { + let root_index = *root_index; + let is_leaf = (*nodes_trueleafs.at(root_index) == 1 + && *nodes_falseleafs.at(root_index) == 1 + && *nodes_truenodeids.at(root_index) == *nodes_falsenodeids.at(root_index)); + + let mut batch_num = 0; + while batch_num != batch_size { + let x_batch = SpanTrait::slice( + (*X).data, batch_num * n_features, n_features + ); + + let (weight, target) = iterate_node( + x_batch, + root_index, + is_leaf, + nodes_splits.data, + nodes_featureids, + nodes_modes, + nodes_truenodeids, + nodes_falsenodeids, + nodes_trueleafs, + nodes_falseleafs, + leaf_targetids, + leaf_weights, + nodes_hitrates, + nodes_missing_value_tracks_true, + nodes_missing_value_tracks_true_flag, + ref map_member_to_nodeid, + ); + weights.append(weight); + target_ids.append(target); + batch_num += 1; + }; + }, + Option::None => { break; } + } + }; + + let weights = weights.span(); + let target_ids = target_ids.span(); + + let mut batch_num = 0; + while batch_num != batch_size { + match aggregate_function { + AGGREGATE_FUNCTION::AVERAGE => { + let mut i = 0; + while i != n_trees { + let index = i * batch_size + batch_num; + res + .set( + batch_num, + *target_ids.at(index), + res.at(batch_num, *target_ids.at(index)) + + *weights.at(index) + / NumberTrait::new_unscaled(n_trees.into(), false) + ); + i += 1; + }; + }, + AGGREGATE_FUNCTION::SUM => { + let mut i = 0; + while i != n_trees { + let index = i * batch_size + batch_num; + res + .set( + batch_num, + *target_ids.at(index), + res.at(batch_num, *target_ids.at(index)) + *weights.at(index) + ); + i += 1; + }; + }, + AGGREGATE_FUNCTION::MIN => { + let mut i = 0; + while i != n_targets { + let val = NumberTrait::min( + res.at(batch_num, *target_ids.at(batch_num)), *weights.at(batch_num) + ); + res.set(batch_num, *target_ids.at(batch_num), val); + i += 1; + }; + }, + AGGREGATE_FUNCTION::MAX => { + let mut i = 0; + while i != n_targets { + let val = NumberTrait::max( + res.at(batch_num, *target_ids.at(batch_num)), *weights.at(batch_num) + ); + res.set(batch_num, *target_ids.at(batch_num), val); + i += 1; + }; + } + } + + batch_num += 1; + }; + + // Post Transform + let mut res = match post_transform { + POST_TRANSFORM::NONE => res, + POST_TRANSFORM::SOFTMAX => res.softmax(1), + POST_TRANSFORM::LOGISTIC => res.sigmoid(), + POST_TRANSFORM::SOFTMAX_ZERO => res.softmax_zero(1), + POST_TRANSFORM::PROBIT => core::panic_with_felt252('Probit not supported yet'), + }; + + return res; + } +} +fn iterate_node< + T, + MAG, + +TensorTrait, + +NumberTrait, + +Copy, + +Drop, + +PartialOrd, + +PartialEq, +>( + X: Span, + current_node_index: usize, + is_leaf: bool, + nodes_splits: Span, + nodes_featureids: Span, + nodes_modes: Span, + nodes_truenodeids: Span, + nodes_falsenodeids: Span, + nodes_trueleafs: Span, + nodes_falseleafs: Span, + leaf_targetids: Span, + leaf_weights: Tensor, + nodes_hitrates: Option>, + nodes_missing_value_tracks_true: Span, + nodes_missing_value_tracks_true_flag: bool, + ref map_member_to_nodeid: Felt252Dict>>, +) -> (T, usize) { + let mut current_node_index = current_node_index; + let mut is_leaf = is_leaf; + + while !is_leaf { + let nmvtt_flag = if nodes_missing_value_tracks_true_flag { + *nodes_missing_value_tracks_true.at(current_node_index) == 1 + } else { + nodes_missing_value_tracks_true_flag + }; + if compare( + *X.at(*nodes_featureids.at(current_node_index)), + current_node_index, + *nodes_splits.at(current_node_index), + *nodes_modes.at(current_node_index), + ref map_member_to_nodeid, + nmvtt_flag + ) { + is_leaf = *nodes_trueleafs.at(current_node_index) == 1; + current_node_index = *nodes_truenodeids.at(current_node_index); + } else { + is_leaf = *nodes_falseleafs.at(current_node_index) == 1; + current_node_index = *nodes_falsenodeids.at(current_node_index); + }; + }; + + return (*leaf_weights.data.at(current_node_index), *leaf_targetids.at(current_node_index)); +} + +fn map_members_to_nodeids< + T, + MAG, + +TensorTrait, + +NumberTrait, + +Copy, + +Drop, + +PartialOrd, + +PartialEq, +>( + current_node_index: usize, + is_leaf: bool, + nodes_modes: Span, + nodes_truenodeids: Span, + nodes_falsenodeids: Span, + nodes_trueleafs: Span, + nodes_falseleafs: Span, + ref set_membership_iter: Span, + ref map_member_to_nodeid: Felt252Dict>>, +) { + let mut current_node_index = current_node_index; + let mut is_leaf = is_leaf; + + if is_leaf { + return; + } + + match *nodes_modes.at(current_node_index) { + NODE_MODE::LEQ => {}, + NODE_MODE::LT => {}, + NODE_MODE::GTE => {}, + NODE_MODE::GT => {}, + NODE_MODE::EQ => {}, + NODE_MODE::NEQ => {}, + NODE_MODE::MEMBER => { + let mut subset_members = ArrayTrait::new(); + loop { + match set_membership_iter.pop_front() { + Option::Some(v) => { + if *v == NumberTrait::NaN() { + break; + } + subset_members.append(*v) + }, + Option::None => { break; } + } + }; + map_member_to_nodeid + .insert(current_node_index.into(), NullableTrait::new(subset_members.span())); + }, + } + // true branch + map_members_to_nodeids( + *nodes_truenodeids.at(current_node_index), + *nodes_trueleafs.at(current_node_index) == 1, + nodes_modes, + nodes_truenodeids, + nodes_falsenodeids, + nodes_trueleafs, + nodes_falseleafs, + ref set_membership_iter, + ref map_member_to_nodeid, + ); + + // false branch + map_members_to_nodeids( + *nodes_falsenodeids.at(current_node_index), + *nodes_falseleafs.at(current_node_index) == 1, + nodes_modes, + nodes_truenodeids, + nodes_falsenodeids, + nodes_trueleafs, + nodes_falseleafs, + ref set_membership_iter, + ref map_member_to_nodeid, + ); +} + + +fn compare< + T, MAG, +TensorTrait, +NumberTrait, +Copy, +Drop, +PartialOrd, +PartialEq +>( + x_feat: T, + current_node_index: usize, + value: T, + mode: NODE_MODE, + ref map_members_to_nodeids: Felt252Dict>>, + nodes_missing_value_tracks_true_flag: bool, +) -> bool { + match mode { + NODE_MODE::LEQ => { + (x_feat <= value && !x_feat.is_nan()) || nodes_missing_value_tracks_true_flag + }, + NODE_MODE::LT => { + (x_feat < value && !x_feat.is_nan()) || nodes_missing_value_tracks_true_flag + }, + NODE_MODE::GTE => { + (x_feat >= value && !x_feat.is_nan()) || nodes_missing_value_tracks_true_flag + }, + NODE_MODE::GT => { + (x_feat > value && !x_feat.is_nan()) || nodes_missing_value_tracks_true_flag + }, + NODE_MODE::EQ => { + (x_feat == value && !x_feat.is_nan()) || nodes_missing_value_tracks_true_flag + }, + NODE_MODE::NEQ => { + (x_feat != value && !x_feat.is_nan()) || nodes_missing_value_tracks_true_flag + }, + NODE_MODE::MEMBER => { + let mut set_members = map_members_to_nodeids.get(current_node_index.into()).deref(); + loop { + match set_members.pop_front() { + Option::Some(v) => { if x_feat == *v { + break true; + } }, + Option::None => { break false; } + } + } + }, + } +} diff --git a/tests/lib.cairo b/tests/lib.cairo index f5cecb77d..c408347ef 100644 --- a/tests/lib.cairo +++ b/tests/lib.cairo @@ -5,3 +5,4 @@ mod nodes; mod ml; mod operators; + diff --git a/tests/ml.cairo b/tests/ml.cairo index 4e3e0781e..b92dbcd83 100644 --- a/tests/ml.cairo +++ b/tests/ml.cairo @@ -5,3 +5,4 @@ mod linear_classifier_test; mod svm_regressor_test; mod svm_classifier_test; mod normalizer_test; +mod tree_ensemble_test; diff --git a/tests/ml/tree_ensemble_test.cairo b/tests/ml/tree_ensemble_test.cairo new file mode 100644 index 000000000..59a5592f6 --- /dev/null +++ b/tests/ml/tree_ensemble_test.cairo @@ -0,0 +1,300 @@ +use orion::numbers::FP16x16; +use orion::operators::tensor::{Tensor, TensorTrait, FP16x16Tensor, U32Tensor}; +use orion::operators::ml::tree_ensemble::tree_ensemble::{ + TreeEnsembleTrait, POST_TRANSFORM, AGGREGATE_FUNCTION, NODE_MODE +}; +use orion::operators::matrix::{MutMatrix, MutMatrixImpl, MutMatrixTrait}; +use orion::numbers::NumberTrait; + + +#[test] +#[available_gas(200000000000)] +fn export_tree_ensemble_two_trees() { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 32768, sign: true }); + data.append(FP16x16 { mag: 26214, sign: true }); + data.append(FP16x16 { mag: 19660, sign: true }); + data.append(FP16x16 { mag: 13107, sign: true }); + data.append(FP16x16 { mag: 6553, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 6553, sign: false }); + data.append(FP16x16 { mag: 13107, sign: false }); + data.append(FP16x16 { mag: 19660, sign: false }); + let mut X = TensorTrait::new(shape.span(), data.span()); + + let mut shape = ArrayTrait::::new(); + shape.append(6); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 5041, sign: false }); + data.append(FP16x16 { mag: 32768, sign: false }); + data.append(FP16x16 { mag: 32768, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 18724, sign: false }); + data.append(FP16x16 { mag: 32768, sign: false }); + let leaf_weights = TensorTrait::new(shape.span(), data.span()); + + let mut shape = ArrayTrait::::new(); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 17462, sign: false }); + data.append(FP16x16 { mag: 40726, sign: false }); + data.append(FP16x16 { mag: 36652, sign: true }); + data.append(FP16x16 { mag: 47240, sign: true }); + let nodes_splits = TensorTrait::new(shape.span(), data.span()); + + let n_targets = 1; + let aggregate_function = AGGREGATE_FUNCTION::AVERAGE; + let nodes_missing_value_tracks_true = Option::None; + let nodes_hitrates = Option::None; + let post_transform = POST_TRANSFORM::NONE; + + let tree_roots: Span = array![0, 2].span(); + let nodes_modes: Span = array![ + NODE_MODE::LEQ, NODE_MODE::LEQ, NODE_MODE::LEQ, NODE_MODE::LEQ + ] + .span(); + + let nodes_featureids: Span = array![0, 2, 0, 0].span(); + let nodes_truenodeids: Span = array![1, 0, 3, 4].span(); + let nodes_trueleafs: Span = array![0, 1, 1, 1].span(); + let nodes_falsenodeids: Span = array![2, 1, 3, 5].span(); + let nodes_falseleafs: Span = array![1, 1, 0, 1].span(); + let leaf_targetids: Span = array![0, 0, 0, 0, 0, 0].span(); + + let mut scores = TreeEnsembleTrait::predict( + @X, + nodes_splits, + nodes_featureids, + nodes_modes, + nodes_truenodeids, + nodes_falsenodeids, + nodes_trueleafs, + nodes_falseleafs, + leaf_targetids, + leaf_weights, + tree_roots, + post_transform, + aggregate_function, + nodes_hitrates, + nodes_missing_value_tracks_true, + Option::None, + n_targets + ); + + // ASSERT SCOREs + assert(scores.at(0, 0) == FP16x16 { mag: 18904, sign: false }, 'scores.at(0, 0)'); + assert(scores.at(1, 0) == FP16x16 { mag: 18904, sign: false }, 'scores.at(1, 0)'); + assert(scores.at(2, 0) == FP16x16 { mag: 18904, sign: false }, 'scores.at(2, 0)'); +} + + +#[test] +#[available_gas(200000000000)] +fn export_tree_ensemble_one_tree() { + let mut shape = ArrayTrait::::new(); + shape.append(3); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 78643, sign: false }); + data.append(FP16x16 { mag: 222822, sign: false }); + data.append(FP16x16 { mag: 7864, sign: true }); + data.append(FP16x16 { mag: 108789, sign: false }); + data.append(FP16x16 { mag: 271319, sign: false }); + data.append(FP16x16 { mag: 115998, sign: false }); + let mut X = TensorTrait::new(shape.span(), data.span()); + + let mut shape = ArrayTrait::::new(); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 342753, sign: false }); + data.append(FP16x16 { mag: 794296, sign: false }); + data.append(FP16x16 { mag: 801505, sign: true }); + data.append(FP16x16 { mag: 472514, sign: false }); + let leaf_weights = TensorTrait::new(shape.span(), data.span()); + + let mut shape = ArrayTrait::::new(); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 205783, sign: false }); + data.append(FP16x16 { mag: 78643, sign: false }); + data.append(FP16x16 { mag: 275251, sign: false }); + let nodes_splits = TensorTrait::new(shape.span(), data.span()); + + let membership_values = Option::None; + + let n_targets = 2; + let aggregate_function = AGGREGATE_FUNCTION::SUM; + let nodes_missing_value_tracks_true = Option::None; + let nodes_hitrates = Option::None; + let post_transform = POST_TRANSFORM::NONE; + + let tree_roots: Span = array![0].span(); + let nodes_modes: Span = array![NODE_MODE::LEQ, NODE_MODE::LEQ, NODE_MODE::LEQ] + .span(); + + let nodes_featureids: Span = array![0, 0, 0].span(); + let nodes_truenodeids: Span = array![1, 0, 1].span(); + let nodes_trueleafs: Span = array![0, 1, 1].span(); + let nodes_falsenodeids: Span = array![2, 2, 3].span(); + let nodes_falseleafs: Span = array![0, 1, 1].span(); + let leaf_targetids: Span = array![0, 1, 0, 1].span(); + + let mut scores = TreeEnsembleTrait::predict( + @X, + nodes_splits, + nodes_featureids, + nodes_modes, + nodes_truenodeids, + nodes_falsenodeids, + nodes_trueleafs, + nodes_falseleafs, + leaf_targetids, + leaf_weights, + tree_roots, + post_transform, + aggregate_function, + nodes_hitrates, + nodes_missing_value_tracks_true, + membership_values, + n_targets + ); + + // ASSERT SCOREs + assert(scores.at(0, 0) == FP16x16 { mag: 342753, sign: false }, 'scores.at(0, 0)'); + assert(scores.at(0, 1) == FP16x16 { mag: 0, sign: false }, 'scores.at(0, 1)'); + + assert(scores.at(1, 0) == FP16x16 { mag: 342753, sign: false }, 'scores.at(1, 0)'); + assert(scores.at(1, 1) == FP16x16 { mag: 0, sign: false }, 'scores.at(1, 1)'); + + assert(scores.at(2, 0) == FP16x16 { mag: 0, sign: false }, 'scores.at(2, 0)'); + assert(scores.at(2, 1) == FP16x16 { mag: 794296, sign: false }, 'scores.at(2, 1)'); +} + + +#[test] +#[available_gas(200000000000)] +fn export_tree_ensemble_set_membership() { + let mut shape = ArrayTrait::::new(); + shape.append(6); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 78643, sign: false }); + data.append(FP16x16 { mag: 222822, sign: false }); + data.append(FP16x16 { mag: 7864, sign: true }); + data.append(NumberTrait::::NaN()); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + let mut X = TensorTrait::new(shape.span(), data.span()); + + let mut shape = ArrayTrait::::new(); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 655360, sign: false }); + data.append(FP16x16 { mag: 65536000, sign: false }); + data.append(FP16x16 { mag: 6553600, sign: false }); + let leaf_weights = TensorTrait::new(shape.span(), data.span()); + + let mut shape = ArrayTrait::::new(); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 720896, sign: false }); + data.append(FP16x16 { mag: 1522663424, sign: false }); + data.append(NumberTrait::::NaN()); + let nodes_splits = TensorTrait::new(shape.span(), data.span()); + + let mut shape = ArrayTrait::::new(); + shape.append(8); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 78643, sign: false }); + data.append(FP16x16 { mag: 242483, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(NumberTrait::::NaN()); + data.append(FP16x16 { mag: 786432, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(NumberTrait::::NaN()); + let membership_values = Option::Some(TensorTrait::new(shape.span(), data.span())); + + let n_targets = 4; + let aggregate_function = AGGREGATE_FUNCTION::SUM; + let nodes_missing_value_tracks_true = Option::None; + let nodes_hitrates = Option::None; + let post_transform = POST_TRANSFORM::NONE; + + let tree_roots: Span = array![0].span(); + let nodes_modes: Span = array![NODE_MODE::LEQ, NODE_MODE::MEMBER, NODE_MODE::MEMBER] + .span(); + + let nodes_featureids: Span = array![0, 0, 0].span(); + let nodes_truenodeids: Span = array![1, 0, 1].span(); + let nodes_trueleafs: Span = array![0, 1, 1].span(); + let nodes_falsenodeids: Span = array![2, 2, 3].span(); + let nodes_falseleafs: Span = array![1, 0, 1].span(); + let leaf_targetids: Span = array![0, 1, 2, 3].span(); + + let mut scores = TreeEnsembleTrait::predict( + @X, + nodes_splits, + nodes_featureids, + nodes_modes, + nodes_truenodeids, + nodes_falsenodeids, + nodes_trueleafs, + nodes_falseleafs, + leaf_targetids, + leaf_weights, + tree_roots, + post_transform, + aggregate_function, + nodes_hitrates, + nodes_missing_value_tracks_true, + membership_values, + n_targets + ); + + // ASSERT SCOREs + assert(scores.at(0, 0) == FP16x16 { mag: 65536, sign: false }, 'scores.at(0, 0)'); + assert(scores.at(0, 1) == FP16x16 { mag: 0, sign: false }, 'scores.at(0, 1)'); + assert(scores.at(0, 2) == FP16x16 { mag: 0, sign: false }, 'scores.at(0, 2)'); + assert(scores.at(0, 3) == FP16x16 { mag: 0, sign: false }, 'scores.at(0, 3)'); + + assert(scores.at(1, 0) == FP16x16 { mag: 0, sign: false }, 'scores.at(1, 0)'); + assert(scores.at(1, 1) == FP16x16 { mag: 0, sign: false }, 'scores.at(1, 1)'); + assert(scores.at(1, 2) == FP16x16 { mag: 0, sign: false }, 'scores.at(1, 2)'); + assert(scores.at(1, 3) == FP16x16 { mag: 6553600, sign: false }, 'scores.at(1, 3)'); + + assert(scores.at(2, 0) == FP16x16 { mag: 0, sign: false }, 'scores.at(2, 0)'); + assert(scores.at(2, 1) == FP16x16 { mag: 0, sign: false }, 'scores.at(2, 1)'); + assert(scores.at(2, 2) == FP16x16 { mag: 0, sign: false }, 'scores.at(2, 2)'); + assert(scores.at(2, 3) == FP16x16 { mag: 6553600, sign: false }, 'scores.at(2, 3)'); + + assert(scores.at(3, 0) == FP16x16 { mag: 0, sign: false }, 'scores.at(3, 0)'); + assert(scores.at(3, 1) == FP16x16 { mag: 0, sign: false }, 'scores.at(3, 1)'); + assert(scores.at(3, 2) == FP16x16 { mag: 65536000, sign: false }, 'scores.at(3, 2)'); + assert(scores.at(3, 3) == FP16x16 { mag: 0, sign: false }, 'scores.at(3, 3)'); + + assert(scores.at(4, 0) == FP16x16 { mag: 0, sign: false }, 'scores.at(4, 0)'); + assert(scores.at(4, 1) == FP16x16 { mag: 0, sign: false }, 'scores.at(4, 1)'); + assert(scores.at(4, 2) == FP16x16 { mag: 65536000, sign: false }, 'scores.at(4, 2)'); + assert(scores.at(4, 3) == FP16x16 { mag: 0, sign: false }, 'scores.at(4, 3)'); + + assert(scores.at(5, 0) == FP16x16 { mag: 0, sign: false }, 'scores.at(5, 0)'); + assert(scores.at(5, 1) == FP16x16 { mag: 655360, sign: false }, 'scores.at(5, 1)'); + assert(scores.at(5, 2) == FP16x16 { mag: 0, sign: false }, 'scores.at(5, 2)'); + assert(scores.at(5, 3) == FP16x16 { mag: 0, sign: false }, 'scores.at(5, 3)'); +} + From 6494920191d5703e39cd443bcbd356be8cd82b97 Mon Sep 17 00:00:00 2001 From: chachaleo Date: Tue, 27 Feb 2024 13:35:14 +0100 Subject: [PATCH 65/68] feat: deform conv 2D --- docs/SUMMARY.md | 3 +- docs/framework/compatibility.md | 1 + .../neural-network/nn.deform_conv.md | 152 +++++ nodegen/node/deform_conv.py | 463 ++++++++++++++ src/operators/nn/core.cairo | 165 +++++ src/operators/nn/functional.cairo | 1 + src/operators/nn/functional/conv.cairo | 1 + src/operators/nn/functional/deform_conv.cairo | 598 ++++++++++++++++++ .../nn/implementations/nn_fp16x16.cairo | 19 +- .../nn/implementations/nn_fp32x32.cairo | 17 +- .../nn/implementations/nn_fp64x64.cairo | 17 +- .../nn/implementations/nn_fp8x23.cairo | 17 +- src/operators/nn/implementations/nn_i32.cairo | 15 - src/operators/nn/implementations/nn_i8.cairo | 15 - src/operators/nn/implementations/nn_u32.cairo | 15 - tests/nodes.cairo | 4 + tests/nodes/deform_conv.cairo | 36 ++ tests/nodes/deform_conv/input_0.cairo | 24 + tests/nodes/deform_conv/input_1.cairo | 19 + tests/nodes/deform_conv/input_2.cairo | 47 ++ tests/nodes/deform_conv/output_0.cairo | 19 + tests/nodes/deform_conv_with_mask_bias.cairo | 40 ++ .../deform_conv_with_mask_bias/input_0.cairo | 24 + .../deform_conv_with_mask_bias/input_1.cairo | 19 + .../deform_conv_with_mask_bias/input_2.cairo | 47 ++ .../deform_conv_with_mask_bias/input_3.cairo | 13 + .../deform_conv_with_mask_bias/input_4.cairo | 31 + .../deform_conv_with_mask_bias/output_0.cairo | 19 + ...orm_conv_with_multiple_offset_groups.cairo | 36 ++ .../input_0.cairo | 33 + .../input_1.cairo | 23 + .../input_2.cairo | 79 +++ .../output_0.cairo | 19 + tests/nodes/deform_conv_with_padding.cairo | 36 ++ .../deform_conv_with_padding/input_0.cairo | 24 + .../deform_conv_with_padding/input_1.cairo | 19 + .../deform_conv_with_padding/input_2.cairo | 143 +++++ .../deform_conv_with_padding/output_0.cairo | 31 + 38 files changed, 2233 insertions(+), 51 deletions(-) create mode 100644 docs/framework/operators/neural-network/nn.deform_conv.md create mode 100644 nodegen/node/deform_conv.py create mode 100644 src/operators/nn/functional/deform_conv.cairo create mode 100644 tests/nodes/deform_conv.cairo create mode 100644 tests/nodes/deform_conv/input_0.cairo create mode 100644 tests/nodes/deform_conv/input_1.cairo create mode 100644 tests/nodes/deform_conv/input_2.cairo create mode 100644 tests/nodes/deform_conv/output_0.cairo create mode 100644 tests/nodes/deform_conv_with_mask_bias.cairo create mode 100644 tests/nodes/deform_conv_with_mask_bias/input_0.cairo create mode 100644 tests/nodes/deform_conv_with_mask_bias/input_1.cairo create mode 100644 tests/nodes/deform_conv_with_mask_bias/input_2.cairo create mode 100644 tests/nodes/deform_conv_with_mask_bias/input_3.cairo create mode 100644 tests/nodes/deform_conv_with_mask_bias/input_4.cairo create mode 100644 tests/nodes/deform_conv_with_mask_bias/output_0.cairo create mode 100644 tests/nodes/deform_conv_with_multiple_offset_groups.cairo create mode 100644 tests/nodes/deform_conv_with_multiple_offset_groups/input_0.cairo create mode 100644 tests/nodes/deform_conv_with_multiple_offset_groups/input_1.cairo create mode 100644 tests/nodes/deform_conv_with_multiple_offset_groups/input_2.cairo create mode 100644 tests/nodes/deform_conv_with_multiple_offset_groups/output_0.cairo create mode 100644 tests/nodes/deform_conv_with_padding.cairo create mode 100644 tests/nodes/deform_conv_with_padding/input_0.cairo create mode 100644 tests/nodes/deform_conv_with_padding/input_1.cairo create mode 100644 tests/nodes/deform_conv_with_padding/input_2.cairo create mode 100644 tests/nodes/deform_conv_with_padding/output_0.cairo diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 6ef033dec..4006ff745 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -175,11 +175,12 @@ * [nn.gemm](framework/operators/neural-network/nn.gemm.md) * [nn.grid\_sample](framework/operators/neural-network/nn.grid\_sample.md) * [nn.col2im](framework/operators/neural-network/nn.col2im.md) - * [nn.conv_transpose](framework/operators/neural-network/nn.conv\_transpose.md) + * [nn.conv\_transpose](framework/operators/neural-network/nn.conv\_transpose.md) * [nn.conv](framework/operators/neural-network/nn.conv.md) * [nn.depth_to_space](framework/operators/neural-network/nn.depth_to_space.md) * [nn.space_to_depth](framework/operators/neural-network/nn.space_to_depth.md) * [nn.max\_pool](framework/operators/neural-network/nn.max\_pool.md) + * [nn.deform\_conv](framework/operators/neural-network/nn.deform\_conv_.md) * [Machine Learning](framework/operators/machine-learning/README.md) * [Tree Ensemble Classifier](framework/operators/machine-learning/tree-ensemble-classifier/README.md) * [tree\_ensemble\_classifier.predict](framework/operators/machine-learning/tree-ensemble-classifier/tree\_ensemble\_classifier.predict.md) diff --git a/docs/framework/compatibility.md b/docs/framework/compatibility.md index 2a6bc7ea2..fb2da75a6 100644 --- a/docs/framework/compatibility.md +++ b/docs/framework/compatibility.md @@ -48,6 +48,7 @@ You can see below the list of current supported ONNX Operators: | [ConvTranspose](operators/neural-network/nn.conv\_transpose_.md) | :white\_check\_mark: | | [Conv](operators/neural-network/nn.conv.md) | :white\_check\_mark: | | [MaxPool](operators/neural-network/nn.max\_pool.md) | :white\_check\_mark: | +| [DeformConv](operators/neural-network/nn.deform\_conv_.md) | :white\_check\_mark: | | [Sinh](operators/tensor/tensor.sinh.md) | :white\_check\_mark: | | [Asinh](operators/tensor/tensor.asinh.md) | :white\_check\_mark: | | [Atanh](operators/tensor/tensor.atanh.md) | :white\_check\_mark: | diff --git a/docs/framework/operators/neural-network/nn.deform_conv.md b/docs/framework/operators/neural-network/nn.deform_conv.md new file mode 100644 index 000000000..6a6718645 --- /dev/null +++ b/docs/framework/operators/neural-network/nn.deform_conv.md @@ -0,0 +1,152 @@ +# NNTrait::deform_conv + +```rust + fn deform_conv( + X: @Tensor, + W: @Tensor, + offset: @Tensor, + B: Option>, + mask: Option>, + dilations: Option>, + group: Option, + kernel_shape: Option>, + offset_group: Option, + pads: Option>, + strides: Option>, + ) -> Tensor +``` + +Performs deformable convolution as described in https://arxiv.org/abs/1703.06211 and https://arxiv.org/abs/1811.11168. This operator specification supports the 2-D case. + +## Args + + X: @Tensor, + W: @Tensor, + offset: @Tensor, + B: Option>, + mask: Option>, + dilations: Option>, + group: Option, + kernel_shape: Option>, + offset_group: Option, + pads: Option>, + strides: Option>, + +* `X`(`@Tensor`) - Input data tensor. For 2D image data, it has shape (N, C, H, W) where N is the batch size, C is the number of input channels, and H and W are the height and width. +* `W`(`@Tensor`) - Weight tensor that will be used in the convolutions. It has shape (oC, C/group, kH, kW), where oC is the number of output channels and kH and kW are the kernel height and width. +* `offset`(`@Tensor`) - Offset tensor denoting the offset for the sampling locations in the convolution kernel. It has shape (N, offset_group * kH * kW * 2, oH, oW) for 2D data +* `B`(`Option>`) - Default is a tensor of zeros, optional 1D bias of length oC to be added to the convolution. +* `mask`(`Option>`) - Default is a tensor of ones, the mask tensor to be applied to each position in the convolution kernel. It has shape (N, offset_group * kH * kW, oH, oW) for 2D data. +* `dilations`(`Option>`) - Default is 1 along each axis, dilation value along each spatial axis of the kernel. +* `group`(`usize`) - Default is 1, number of groups the input and output channels, C and oC, are divided into. +* `kernel_shape`(`Option>`) - Shape of the convolution kernel. If not present, it is inferred from the shape of input W. +* `offset_group`(`Option`) - Default is 1, number of groups of offset. C must be divisible by offset_group. +* `pads`(`Option>`) - Default is 0 along each axis, padding for the beginning and end along each spatial axis. The values represent the number of pixels added to the beginning and end of the corresponding axis and can take any nonnegative value. +* `strides`(`Option>`) - Default is 1 along each axis, stride along each spatial axis. + +## Returns + +A `Tensor` output tensor that contains the result of convolution. + +## Examples + +```rust +fn example_deform_conv() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + let mut X = TensorTrait::new(shape.span(), data.span()); + + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + let mut W = TensorTrait::new(shape.span(), data.span()); + + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(8); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 32768, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 6553, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + let mut offset = TensorTrait::new(shape.span(), data.span()); + + + return NNTrait::deform_conv( + @X, + @W, + @offset, + Option::None, + Option::None, + Option::None, + Option::None, + Option::Some(array![2, 2].span()), + Option::None, + Option::Some(array![0, 0, 0, 0].span()), + Option::None, + ); +} + +>>> [ + [ + [ + [9.5, 11.9], + [20.0, 24.0], + ] + ] + ] + +```` \ No newline at end of file diff --git a/nodegen/node/deform_conv.py b/nodegen/node/deform_conv.py new file mode 100644 index 000000000..abb101dc7 --- /dev/null +++ b/nodegen/node/deform_conv.py @@ -0,0 +1,463 @@ +import numpy as np +from nodegen.node import RunAll +from ..helpers import make_test, to_fp, Tensor, Dtype, FixedImpl, Trait + +import numpy as np + +def deform_conv_implementation( # type: ignore + X, + W, + offset, + B=None, + mask=None, + dilations=None, + group=None, + kernel_shape=None, + offset_group=None, + pads=None, + strides=None, +): + if dilations is None: + dilations = [1 for s in X.shape[2:]] + if kernel_shape is None: + kernel_shape = W.shape[2:] + if pads is None: + pads = [0 for s in X.shape[2:]] * 2 + if strides is None: + strides = [1 for s in X.shape[2:]] + if group is None: + group = 1 + if offset_group is None: + offset_group = 1 + + n, ic = X.shape[:2] + oc = W.shape[0] + output_shape = offset.shape[2:] + + if ic != W.shape[1] * group or oc % group != 0: + raise ValueError( + f"Shape inconsistencies, X.shape={X.shape}, W.shape={W.shape}, group={group}." + ) + ics_per_group, ocs_per_group = W.shape[1], oc // group + + if ic % offset_group != 0: + raise ValueError("Number of input channels must be divisible by offset_group.") + ics_per_offset_group = ic // offset_group + + if offset_group * np.prod(kernel_shape) * len(kernel_shape) != offset.shape[1]: + raise ValueError( + f"Offset shape {offset.shape} is inconsistent with offset_group {offset_group} " + f"and kernel shape {kernel_shape}." + ) + offset = offset.reshape( + (n, offset_group, *kernel_shape, len(kernel_shape), *output_shape) + ) + + if mask is None: + mask = np.ones((n, offset_group * np.prod(kernel_shape), *output_shape)) + mask = mask.reshape((n, offset_group, *kernel_shape, *output_shape)) + + from onnx.reference.ops._op_list import GridSample + + if len(X.shape) == 4: + ih, iw = X.shape[2:] + oh, ow = offset.shape[-2:] + kh, kw = kernel_shape + sth, stw = strides + dh, dw = dilations + kh_new, kw_new = (kh - 1) * dh + 1, (kw - 1) * dw + 1 + + if oh != int(((ih - kh_new + pads[0] + pads[2]) / sth) + 1) or ow != int( + ((iw - kw_new + pads[1] + pads[3]) / stw) + 1 + ): + raise RuntimeError( + "Padding, dilation, stride, and kernel shape incompatible with output shape." + ) + + bh, bw = -pads[0], -pads[1] + + res = np.zeros((n, oc, oh, ow), dtype=X.dtype) + if B is not None: + res[:, :, :, :] = B.reshape((1, -1, 1, 1)) + + kernel_pos_w, kernel_pos_h = np.meshgrid( + np.arange(0, kw_new, dw), np.arange(0, kh_new, dh) + ) + + kernel_pos_wrt_first_elem = np.stack( + (kernel_pos_h, kernel_pos_w), axis=2 + ) + + for batch_idx in range(n): + for oc_idx in range(oc): + for ic_idx in range(ic): + # Group convolution logic + if ic_idx // ics_per_group != oc_idx // ocs_per_group: + # Input channel and output channel don't belong to same group + continue + + # Offset group logic + offset_group_idx = ic_idx // ics_per_offset_group + + for i in range(oh): + h_coord = bh + sth * i + for j in range(ow): + w_coord = bw + stw * j + + kernel = np.copy(kernel_pos_wrt_first_elem).astype(float) + kernel[:, :, 0] += ( + h_coord + + offset[batch_idx, offset_group_idx, :, :, 0, i, j] + ) + kernel[:, :, 1] += ( + w_coord + + offset[batch_idx, offset_group_idx, :, :, 1, i, j] + ) + + kernel[:, :, 0] = kernel[:, :, 0] / (ih - 1) * 2 - 1 + kernel[:, :, 1] = kernel[:, :, 1] / (iw - 1) * 2 - 1 + + kernel = np.expand_dims(kernel, 0) + + kernel = np.flip( + kernel, 3 + ) + + grid_sample_output = GridSample.eval( + X[batch_idx : batch_idx + 1, ic_idx : ic_idx + 1], + kernel, + align_corners=1, + ) + + conv_value = np.multiply( + grid_sample_output, + W[oc_idx, ic_idx % ics_per_group, :, :], + ) + conv_value = np.multiply( + conv_value, + mask[batch_idx, offset_group_idx, :, :, i, j], + ) + res[batch_idx, oc_idx, i, j] += np.sum(conv_value) + + return res + raise RuntimeError( + f"The convolution for X.shape={X.shape}, W.shape={W.shape}, " + f"kernel_shape={kernel_shape} is not implemented yet." + ) + + + +def deform_conv_implementation( # type: ignore + X, + W, + offset, + B=None, + mask=None, + dilations=None, + group=None, + kernel_shape=None, + offset_group=None, + pads=None, + strides=None, +): + if dilations is None: + dilations = [1 for s in X.shape[2:]] + if kernel_shape is None: + kernel_shape = W.shape[2:] + if pads is None: + pads = [0 for s in X.shape[2:]] * 2 + if strides is None: + strides = [1 for s in X.shape[2:]] + if group is None: + group = 1 + if offset_group is None: + offset_group = 1 + + n, ic = X.shape[:2] + oc = W.shape[0] + output_shape = offset.shape[2:] + + if ic != W.shape[1] * group or oc % group != 0: + raise ValueError( + f"Shape inconsistencies, X.shape={X.shape}, W.shape={W.shape}, group={group}." + ) + ics_per_group, ocs_per_group = W.shape[1], oc // group + + if ic % offset_group != 0: + raise ValueError("Number of input channels must be divisible by offset_group.") + ics_per_offset_group = ic // offset_group + + if offset_group * np.prod(kernel_shape) * len(kernel_shape) != offset.shape[1]: + raise ValueError( + f"Offset shape {offset.shape} is inconsistent with offset_group {offset_group} " + f"and kernel shape {kernel_shape}." + ) + offset = offset.reshape( + (n, offset_group, *kernel_shape, len(kernel_shape), *output_shape) + ) + + if mask is None: + mask = np.ones((n, offset_group * np.prod(kernel_shape), *output_shape)) + mask = mask.reshape((n, offset_group, *kernel_shape, *output_shape)) + + from onnx.reference.ops._op_list import GridSample + + if len(X.shape) == 4: + ih, iw = X.shape[2:] + oh, ow = offset.shape[-2:] + kh, kw = kernel_shape + sth, stw = strides + dh, dw = dilations + kh_new, kw_new = (kh - 1) * dh + 1, (kw - 1) * dw + 1 + + if oh != int(((ih - kh_new + pads[0] + pads[2]) / sth) + 1) or ow != int( + ((iw - kw_new + pads[1] + pads[3]) / stw) + 1 + ): + raise RuntimeError( + "Padding, dilation, stride, and kernel shape incompatible with output shape." + ) + + bh, bw = -pads[0], -pads[1] + + res = np.zeros((n, oc, oh, ow), dtype=X.dtype) + if B is not None: + res[:, :, :, :] = B.reshape((1, -1, 1, 1)) + + kernel_pos_w, kernel_pos_h = np.meshgrid( + np.arange(0, kw_new, dw), np.arange(0, kh_new, dh) + ) + + kernel_pos_wrt_first_elem = np.stack( + (kernel_pos_h, kernel_pos_w), axis=2 + ) + + for batch_idx in range(n): + for oc_idx in range(oc): + for ic_idx in range(ic): + # Group convolution logic + if ic_idx // ics_per_group != oc_idx // ocs_per_group: + # Input channel and output channel don't belong to same group + continue + + # Offset group logic + offset_group_idx = ic_idx // ics_per_offset_group + + for i in range(oh): + h_coord = bh + sth * i + for j in range(ow): + w_coord = bw + stw * j + + kernel = np.copy(kernel_pos_wrt_first_elem).astype(float) + kernel[:, :, 0] += ( + h_coord + + offset[batch_idx, offset_group_idx, :, :, 0, i, j] + ) + kernel[:, :, 1] += ( + w_coord + + offset[batch_idx, offset_group_idx, :, :, 1, i, j] + ) + + kernel[:, :, 0] = kernel[:, :, 0] / (ih - 1) * 2 - 1 + kernel[:, :, 1] = kernel[:, :, 1] / (iw - 1) * 2 - 1 + + kernel = np.expand_dims(kernel, 0) + + kernel = np.flip( + kernel, 3 + ) + + grid_sample_output = GridSample.eval( + X[batch_idx : batch_idx + 1, ic_idx : ic_idx + 1], + kernel, + align_corners=1, + ) + + conv_value = np.multiply( + grid_sample_output, + W[oc_idx, ic_idx % ics_per_group, :, :], + ) + conv_value = np.multiply( + conv_value, + mask[batch_idx, offset_group_idx, :, :, i, j], + ) + res[batch_idx, oc_idx, i, j] += np.sum(conv_value) + + return res + raise RuntimeError( + f"The convolution for X.shape={X.shape}, W.shape={W.shape}, " + f"kernel_shape={kernel_shape} is not implemented yet." + ) + + +class Deform_conv(RunAll): + + @staticmethod + def export_deform_conv_without_padding() -> None: + x = np.arange(9).astype(np.float32) + x.shape = (1, 1, 3, 3) + w = np.ones((1, 1, 2, 2), dtype=np.float32) + + # Convolution without padding + offset = np.zeros((1, 8, 2, 2), dtype=np.float32) + offset[ + 0, 0, 0, 0 + ] = 0.5 + offset[ + 0, 5, 0, 1 + ] = -0.1 + + + + y = deform_conv_implementation(x, w, offset, kernel_shape=[2, 2]) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + offset = Tensor(Dtype.FP16x16, offset.shape, to_fp(offset.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "deform_conv" + func_sig = "NNTrait::deform_conv(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "@input_2," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![2, 2].span())," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None)" + make_test( + [x, w, offset], y, func_sig, name, Trait.NN) + + @staticmethod + def export_deform_conv_with_padding() -> None: + x = np.arange(9).astype(np.float32) + x.shape = (1, 1, 3, 3) + w = np.ones((1, 1, 2, 2), dtype=np.float32) + + # Convolution with padding + offset = np.zeros((1, 8, 4, 4), dtype=np.float32) + offset[ + 0, 0, 0, 0 + ] = 0.5 + offset[ + 0, 5, 1, 2 + ] = -0.1 + + + + y = deform_conv_implementation(x, w, offset, kernel_shape=[2, 2], pads=[1, 1, 1, 1]) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + offset = Tensor(Dtype.FP16x16, offset.shape, to_fp(offset.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "deform_conv_with_padding" + func_sig = "NNTrait::deform_conv(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "@input_2," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![2, 2].span())," + func_sig += "Option::None," + func_sig += "Option::Some(array![1, 1, 1, 1].span())," + func_sig += "Option::None)" + make_test( + [x, w, offset], y, func_sig, name, Trait.NN) + + @staticmethod + def export_deform_conv_with_mask_bias() -> None: + x = np.arange(9).astype(np.float32) + x.shape = (1, 1, 3, 3) + w = np.ones((1, 1, 2, 2), dtype=np.float32) + + b = np.ones((1,), dtype=np.float32) + + offset = np.zeros((1, 8, 2, 2), dtype=np.float32) + offset[ + 0, 0, 0, 0 + ] = 0.5 + offset[ + 0, 5, 0, 1 + ] = -0.1 + + mask = np.ones((1, 4, 2, 2), dtype=np.float32) + mask[0, 2, 1, 1] = 0.2 + + y = deform_conv_implementation(x, w, offset, mask=mask, B=b, kernel_shape=[2, 2]) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + offset = Tensor(Dtype.FP16x16, offset.shape, to_fp(offset.flatten(), FixedImpl.FP16x16)) + b = Tensor(Dtype.FP16x16, b.shape, to_fp(b.flatten(), FixedImpl.FP16x16)) + mask = Tensor(Dtype.FP16x16, mask.shape, to_fp(mask.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "deform_conv_with_mask_bias" + func_sig = "NNTrait::deform_conv(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "@input_2," + func_sig += "Option::Some(input_3.data)," + func_sig += "Option::Some(input_4)," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![2, 2].span())," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None)" + make_test( + [x, w, offset, b, mask], y, func_sig, name, Trait.NN) + + + @staticmethod + def export_deform_conv_with_multiple_offset_groups() -> None: + x = np.zeros((1, 2, 3, 3), dtype=np.float32) + x[0, 0] = np.reshape(np.arange(9).astype(np.float32), (3, 3)) + x[0, 1] = np.reshape(np.arange(8, -1, -1).astype(np.float32), (3, 3)) + x.shape = (1, 2, 3, 3) + w = np.ones((1, 2, 2, 2), dtype=np.float32) + + offset = np.zeros((1, 16, 2, 2), dtype=np.float32) + offset[ + 0, 0, 0, 0 + ] = 0.5 + offset[ + 0, 13, 0, 1 + ] = ( + -0.1 + ) + + + y = deform_conv_implementation(x, w, offset, offset_group=2, kernel_shape=[2, 2]) + + x = Tensor(Dtype.FP16x16, x.shape, to_fp(x.flatten(), FixedImpl.FP16x16)) + w = Tensor(Dtype.FP16x16, w.shape, to_fp(w.flatten(), FixedImpl.FP16x16)) + offset = Tensor(Dtype.FP16x16, offset.shape, to_fp(offset.flatten(), FixedImpl.FP16x16)) + y = Tensor(Dtype.FP16x16, y.shape, to_fp(y.flatten(), FixedImpl.FP16x16)) + + name = "deform_conv_with_multiple_offset_groups" + func_sig = "NNTrait::deform_conv(" + func_sig += "@input_0," + func_sig += "@input_1," + func_sig += "@input_2," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::None," + func_sig += "Option::Some(array![2, 2].span())," + func_sig += "Option::Some(2)," + func_sig += "Option::None," + func_sig += "Option::None)" + make_test( + [x, w, offset], y, func_sig, name, Trait.NN) + + + + \ No newline at end of file diff --git a/src/operators/nn/core.cairo b/src/operators/nn/core.cairo index e3b7b5a7b..7227870b8 100644 --- a/src/operators/nn/core.cairo +++ b/src/operators/nn/core.cairo @@ -1425,4 +1425,169 @@ trait NNTrait { strides: Option>, output_len: usize, ) -> (Tensor, Option>); + /// # NNTrait::deform_conv + /// + /// ```rust + /// fn deform_conv( + /// X: @Tensor, + /// W: @Tensor, + /// offset: @Tensor, + /// B: Option>, + /// mask: Option>, + /// dilations: Option>, + /// group: Option, + /// kernel_shape: Option>, + /// offset_group: Option, + /// pads: Option>, + /// strides: Option>, + /// ) -> Tensor + /// ``` + /// + /// Performs deformable convolution as described in https://arxiv.org/abs/1703.06211 and https://arxiv.org/abs/1811.11168. This operator specification supports the 2-D case. + /// + /// ## Args + /// + /// X: @Tensor, + /// W: @Tensor, + /// offset: @Tensor, + /// B: Option>, + /// mask: Option>, + /// dilations: Option>, + /// group: Option, + /// kernel_shape: Option>, + /// offset_group: Option, + /// pads: Option>, + /// strides: Option>, + /// + /// * `X`(`@Tensor`) - Input data tensor. For 2D image data, it has shape (N, C, H, W) where N is the batch size, C is the number of input channels, and H and W are the height and width. + /// * `W`(`@Tensor`) - Weight tensor that will be used in the convolutions. It has shape (oC, C/group, kH, kW), where oC is the number of output channels and kH and kW are the kernel height and width. + /// * `offset`(`@Tensor`) - Offset tensor denoting the offset for the sampling locations in the convolution kernel. It has shape (N, offset_group * kH * kW * 2, oH, oW) for 2D data + /// * `B`(`Option>`) - Default is a tensor of zeros, optional 1D bias of length oC to be added to the convolution. + /// * `mask`(`Option>`) - Default is a tensor of ones, the mask tensor to be applied to each position in the convolution kernel. It has shape (N, offset_group * kH * kW, oH, oW) for 2D data. + /// * `dilations`(`Option>`) - Default is 1 along each axis, dilation value along each spatial axis of the kernel. + /// * `group`(`usize`) - Default is 1, number of groups the input and output channels, C and oC, are divided into. + /// * `kernel_shape`(`Option>`) - Shape of the convolution kernel. If not present, it is inferred from the shape of input W. + /// * `offset_group`(`Option`) - Default is 1, number of groups of offset. C must be divisible by offset_group. + /// * `pads`(`Option>`) - Default is 0 along each axis, padding for the beginning and end along each spatial axis. The values represent the number of pixels added to the beginning and end of the corresponding axis and can take any nonnegative value. + /// * `strides`(`Option>`) - Default is 1 along each axis, stride along each spatial axis. + /// + /// ## Returns + /// + /// A `Tensor` output tensor that contains the result of convolution. + /// + /// ## Examples + /// + /// ```rust + /// fn example_deform_conv() -> Tensor { + /// let mut shape = ArrayTrait::::new(); + /// shape.append(1); + /// shape.append(1); + /// shape.append(3); + /// shape.append(3); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 131072, sign: false }); + /// data.append(FP16x16 { mag: 196608, sign: false }); + /// data.append(FP16x16 { mag: 262144, sign: false }); + /// data.append(FP16x16 { mag: 327680, sign: false }); + /// data.append(FP16x16 { mag: 393216, sign: false }); + /// data.append(FP16x16 { mag: 458752, sign: false }); + /// data.append(FP16x16 { mag: 524288, sign: false }); + /// let mut X = TensorTrait::new(shape.span(), data.span()); + /// + /// let mut shape = ArrayTrait::::new(); + /// shape.append(1); + /// shape.append(1); + /// shape.append(2); + /// shape.append(2); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// data.append(FP16x16 { mag: 65536, sign: false }); + /// let mut W = TensorTrait::new(shape.span(), data.span()); + /// + /// let mut shape = ArrayTrait::::new(); + /// shape.append(1); + /// shape.append(8); + /// shape.append(2); + /// shape.append(2); + /// + /// let mut data = ArrayTrait::new(); + /// data.append(FP16x16 { mag: 32768, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 6553, sign: true }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// data.append(FP16x16 { mag: 0, sign: false }); + /// let mut offset = TensorTrait::new(shape.span(), data.span()); + /// + /// + /// return NNTrait::deform_conv( + /// @X, + /// @W, + /// @offset, + /// Option::None, + /// Option::None, + /// Option::None, + /// Option::None, + /// Option::Some(array![2, 2].span()), + /// Option::None, + /// Option::Some(array![0, 0, 0, 0].span()), + /// Option::None, + /// ); + /// } + /// + /// >>> [ + /// [ + /// [ + /// [9.5, 11.9], + /// [20.0, 24.0], + /// ] + /// ] + /// ] + /// + /// ```` + fn deform_conv( + X: @Tensor, + W: @Tensor, + offset: @Tensor, + B: Option>, + mask: Option>, + dilations: Option>, + group: Option, + kernel_shape: Option>, + offset_group: Option, + pads: Option>, + strides: Option>, + ) -> Tensor; } diff --git a/src/operators/nn/functional.cairo b/src/operators/nn/functional.cairo index f02570148..50dbebb38 100644 --- a/src/operators/nn/functional.cairo +++ b/src/operators/nn/functional.cairo @@ -18,3 +18,4 @@ mod space_to_depth; mod conv; mod max_pool; mod common_pool; +mod deform_conv; diff --git a/src/operators/nn/functional/conv.cairo b/src/operators/nn/functional/conv.cairo index 0dcea51f2..851c15f88 100644 --- a/src/operators/nn/functional/conv.cairo +++ b/src/operators/nn/functional/conv.cairo @@ -7,6 +7,7 @@ use orion::operators::tensor::core::{stride}; use orion::operators::nn::AUTO_PAD; +/// Cf: NNTrait::conv docstring fn conv< T, MAG, +TensorTrait, +NumberTrait, +Copy, +Drop, +Add, +Mul, +AddEq, >( diff --git a/src/operators/nn/functional/deform_conv.cairo b/src/operators/nn/functional/deform_conv.cairo new file mode 100644 index 000000000..1e43e36b7 --- /dev/null +++ b/src/operators/nn/functional/deform_conv.cairo @@ -0,0 +1,598 @@ +use core::array::ArrayTrait; +use orion::numbers::NumberTrait; +use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor,}; +use orion::operators::vec::{NullableVec, NullableVecImpl}; +use orion::operators::tensor::core::{stride}; +use core::debug::PrintTrait; +use core::traits::Into; +use orion::numbers::{U32IntoI32, I32IntoU32, I32Div, I32Number}; + + +use orion::operators::nn::functional::grid_sample::{grid_sample}; + + +/// Cf: NNTrait::deform_conv docstring +fn deform_conv< + T, + MAG, + +TensorTrait, + +NumberTrait, + +Copy, + +Drop, + +Add, + +Mul, + +Sub, + +Div, + +AddEq, + +PrintTrait, + +PartialOrd, + +PartialEq, + +TryInto, + +Into, + +Rem, + +Neg, + +SubEq, + +Mul>, +>( + X: @Tensor, + W: @Tensor, + offset: @Tensor, + B: Option>, + mask: Option>, + dilations: Option>, + group: Option, + kernel_shape: Option>, + offset_group: Option, + pads: Option>, + strides: Option>, +) -> Tensor { + assert((*X).shape.len() >= 3, 'X must have at least 3 dim'); + let dilations = match dilations { + Option::Some(dilations) => dilations, + Option::None => { + let mut dilations = ArrayTrait::new(); + let mut i = 2; + loop { + if i >= (*X).shape.len() { + break; + } + dilations.append(1); + i += 1; + }; + dilations.span() + }, + }; + let kernel_shape = match kernel_shape { + Option::Some(kernel_shape) => kernel_shape, + Option::None => { + let mut kernel_shape = ArrayTrait::new(); + let mut i = 2; + loop { + if i >= (*W).shape.len() { + break; + } + kernel_shape.append(*(*W).shape.at(i)); + i += 1; + }; + kernel_shape.span() + }, + }; + let pads = match pads { + Option::Some(pads) => pads, + Option::None => { + let mut pads = ArrayTrait::new(); + let mut i = 2; + loop { + if i >= (*X).shape.len() { + break; + } + pads.append(0); + pads.append(0); + i += 1; + }; + pads.span() + }, + }; + let strides = match strides { + Option::Some(strides) => strides, + Option::None => { + let mut strides = ArrayTrait::new(); + let mut i = 2; + loop { + if i >= (*X).shape.len() { + break; + } + strides.append(1); + i += 1; + }; + strides.span() + }, + }; + let group = match group { + Option::Some(group) => group, + Option::None => { 1 }, + }; + + let offset_group = match offset_group { + Option::Some(offset_group) => offset_group, + Option::None => { 1 }, + }; + + let n = *(*X).shape.at(0); + let ic = *(*X).shape.at(1); + let oc = *(*W).shape.at(0); + let output_shape = SpanTrait::slice((*offset).shape, 2, (*offset).shape.len() - 2); + + assert(ic == *(*W).shape.at(1) * group, 'shape inconsistencies'); + assert(oc % group == 0, 'shape inconsistencies'); + + let ics_per_group = *(*W).shape.at(1); + let ocs_per_group = oc / group; + + assert(ic % offset_group == 0, 'offset_group inconsistencies'); + + let ics_per_offset_group = ic / offset_group; + + assert( + offset_group * prod(kernel_shape, 0) * kernel_shape.len() == *(*offset).shape.at(1), + 'offset_group inconsistencies' + ); + + let mut offset_shape = array![n, offset_group]; + offset_shape.append_span(kernel_shape); + offset_shape.append(kernel_shape.len()); + offset_shape.append_span(output_shape); + + let offset = offset.reshape(offset_shape.span()); + + let mask = match mask { + Option::Some(mask) => mask, + Option::None => { + let mut mask = ArrayTrait::::new(); + let mut i = 0; + loop { + if i == n * offset_group * prod(kernel_shape, 0) * prod(output_shape, 0) { + break; + } + mask.append(NumberTrait::::one()); + i += 1; + }; + let mut mask_shape = array![n, offset_group * prod(kernel_shape, 0)]; + mask_shape.append_span(output_shape); + TensorTrait::new(mask_shape.span(), mask.span()) + }, + }; + + let mut mask_shape = array![n, offset_group]; + mask_shape.append_span(kernel_shape); + mask_shape.append_span(output_shape); + let mask = mask.reshape(mask_shape.span()); + + if (*X).shape.len() == 4 { + let ih: T = NumberTrait::new_unscaled((*(*X).shape.at(2)).into(), false); + let iw: T = NumberTrait::new_unscaled((*(*X).shape.at(3)).into(), false); + + let x_stride = stride((*X).shape); + let w_stride = stride((*W).shape); + let offset_stride = stride(offset.shape); + let mask_stride = stride(mask.shape); + + let mut x_subset_shape = array![1, 1]; + x_subset_shape.append_span(SpanTrait::slice(*(X).shape, 2, (*(X).shape).len() - 2)); + let x_subset_shape = x_subset_shape.span(); + + let mut w_subset_shape = array![1, 1]; + w_subset_shape.append_span(SpanTrait::slice(*(W).shape, 2, (*(W).shape).len() - 2)); + let w_subset_shape = w_subset_shape.span(); + + let oh = *offset.shape.at(offset_shape.len() - 2); + let ow = *offset.shape.at(offset_shape.len() - 1); + + let kh = *kernel_shape.at(0); + let kw = *kernel_shape.at(1); + + let sth: T = NumberTrait::new_unscaled((*strides.at(0)).into(), false); + let stw: T = NumberTrait::new_unscaled((*strides.at(1)).into(), false); + + let dh = *dilations.at(0); + let dw = *dilations.at(1); + + let kh_new = (kh - 1) * dh + 1; + let kw_new = (kw - 1) * dw + 1; + + let bh: T = NumberTrait::new_unscaled((*pads.at(0)).into(), true); + let bw: T = NumberTrait::new_unscaled((*pads.at(1)).into(), true); + + assert( + oh == (((*(*X).shape.at(2) - kh_new + *pads.at(0) + *pads.at(2)) / *strides.at(0)) + 1), + 'incompatible shapes' + ); + assert( + ow == (((*(*X).shape.at(3) - kw_new + *pads.at(1) + *pads.at(3)) / *strides.at(1)) + 1), + 'incompatible shapes' + ); + + let mut res = NullableVecImpl::new(); + let res_shape = array![n, oc, oh, ow].span(); + let res_stride = stride(res_shape); + res.set(n * *res_stride.at(0) - 1, NumberTrait::zero()); + + match B { + Option::Some(B) => { + let mut i = 0; + loop { + if i == n { + break; + } + let mut j = 0; + loop { + if j == oc { + break; + } + let b_j = *B.at(j); + let mut k = 0; + loop { + if k == oh { + break; + } + let mut l = 0; + loop { + if l == ow { + break; + } + res + .set( + i * *res_stride.at(0) + + j * *res_stride.at(1) + + k * *res_stride.at(2) + + l, + b_j + ); + l += 1; + }; + k += 1; + }; + j += 1; + }; + i += 1; + }; + }, + Option::None => {}, + } + + let (kernel_pos_w, kernel_pos_h) = meshgrid(arange(0, kw_new, dw), arange(0, kh_new, dh)); + let kernel_pos_wrt_first_elem = stack(kernel_pos_h, kernel_pos_w); + + let dh: T = NumberTrait::new_unscaled(dh.into(), false); + let dw: T = NumberTrait::new_unscaled(dw.into(), false); + + let kh_new: T = NumberTrait::new_unscaled(kh_new.into(), false); + let kw_new: T = NumberTrait::new_unscaled(kw_new.into(), false); + + // dimension of kernel_pos_wrt_first_elem is ks0 x ks1 + let ks0 = NumberTrait::ceil(kh_new / dh).try_into().unwrap(); + let ks1 = NumberTrait::ceil(kw_new / dw).try_into().unwrap(); + + let one: T = NumberTrait::one(); + let two: T = NumberTrait::one() + NumberTrait::one(); + + let mut batch_idx = 0; + loop { + if batch_idx == n { + break; + } + let mut oc_idx = 0; + loop { + if oc_idx == oc { + break; + } + let mut ic_idx = 0; + loop { + if ic_idx == ic { + break; + } + if (ic_idx / ics_per_group) == (oc_idx / ocs_per_group) { + let offset_group_idx = ic_idx / ics_per_offset_group; + + let mut i = 0; + loop { + if i == oh { + break; + } + let index = NumberTrait::new_unscaled(i.into(), false); + let h_coord = bh + sth * index; + let mut j = 0; + loop { + if j == ow { + break; + } + let jndex = NumberTrait::new_unscaled(j.into(), false); + let w_coord = bw + stw * jndex; + + let mut kernel = copy_to_vec(kernel_pos_wrt_first_elem); + let mut mask_subset = ArrayTrait::new(); + let mut kernel_test = ArrayTrait::new(); + let mut offset_TEST = ArrayTrait::new(); + + let mut hi = 0; + loop { + if hi == ks0 { + break; + } + let mut wi = 0; + loop { + if wi == ks1 { + break; + } + let elem1 = h_coord + + *offset + .data + .at( + batch_idx * *offset_stride.at(0) + + offset_group_idx * *offset_stride.at(1) + + hi * *offset_stride.at(2) + + wi * *offset_stride.at(3) + + 0 * *offset_stride.at(4) + + i * *offset_stride.at(5) + + j + ); + let elem2 = w_coord + + *offset + .data + .at( + batch_idx * *offset_stride.at(0) + + offset_group_idx * *offset_stride.at(1) + + hi * *offset_stride.at(2) + + wi * *offset_stride.at(3) + + 1 * *offset_stride.at(4) + + i * *offset_stride.at(5) + + j + ); + + mask_subset + .append( + *mask + .data + .at( + batch_idx * *mask_stride.at(0) + + offset_group_idx * *mask_stride.at(1) + + hi * *mask_stride.at(2) + + wi * *mask_stride.at(3) + + i * *mask_stride.at(4) + + j + ) + ); + kernel_test.append(kernel.at(hi * (ks1 * 2) + wi * 2)); + offset_TEST + .append( + *offset + .data + .at( + batch_idx * *offset_stride.at(0) + + offset_group_idx + * *offset_stride.at(1) + + hi * *offset_stride.at(2) + + wi * *offset_stride.at(3) + + 0 * *offset_stride.at(4) + + i * *offset_stride.at(5) + + j + ) + ); + kernel + .set( + hi * (ks1 * 2) + wi * 2, + (kernel.at(hi * (ks1 * 2) + wi * 2) + elem1) + / (ih - one) + * two + - one + ); + kernel + .set( + hi * (ks1 * 2) + wi * 2 + 1, + (kernel.at(hi * (ks1 * 2) + wi * 2 + 1) + elem2) + / (iw - one) + * two + - one + ); + wi += 1; + }; + hi += 1; + }; + let kernel = flip_mod_2(ref kernel); + + let subset_x = TensorTrait::new( + x_subset_shape, + SpanTrait::slice( + (*X).data, + batch_idx * *x_stride.at(0) + ic_idx * *x_stride.at(1), + *x_stride.at(1) + ) + ); + let subset_w = TensorTrait::new( + w_subset_shape, + SpanTrait::slice( + (*W).data, + oc_idx * *w_stride.at(0) + + (ic_idx % ics_per_group) * *w_stride.at(1), + *w_stride.at(1) + ) + ); + let mask_subset = TensorTrait::new( + array![1, 1, ks0, ks1].span(), mask_subset.span() + ); + let kernel = TensorTrait::new( + array![1, ks0, ks1, 2].span(), kernel + ); + + let grid_sample_output = grid_sample( + @subset_x, @kernel, Option::Some(1), Option::None, Option::None + ); + + // broadcasted multiply + let conv_value = (grid_sample_output * subset_w); + let conv_value = (conv_value * mask_subset); + + res + .set( + batch_idx * *res_stride.at(0) + + oc_idx * *res_stride.at(1) + + i * *res_stride.at(2) + + j, + res + .at( + batch_idx * *res_stride.at(0) + + oc_idx * *res_stride.at(1) + + i * *res_stride.at(2) + + j + ) + + sum(conv_value.data, 0) + ); + j += 1; + }; + i += 1; + }; + } + ic_idx += 1; + }; + oc_idx += 1; + }; + batch_idx += 1; + }; + + let mut res_data = ArrayTrait::new(); + let mut i = 0; + loop { + if i == res.len() { + break; + } + res_data.append(res.at(i)); + i += 1; + }; + return TensorTrait::new(res_shape, res_data.span()); + } + + panic(array!['not supported yet!']) +} + + +fn meshgrid(x: Span, y: Span) -> (Span, Span) { + let mut xv = ArrayTrait::new(); + let mut yv = ArrayTrait::new(); + + let mut i = 0; + loop { + if i == y.len() { + break; + } + + xv.append_span(x); + let mut j = 0; + loop { + if j == x.len() { + break; + } + yv.append(*y.at(i)); + j += 1; + }; + i += 1; + }; + return (xv.span(), yv.span()); +} + +fn stack(x: Span, y: Span) -> Span { + let mut stack = ArrayTrait::new(); + + let mut i = 0; + loop { + if i == x.len() { + break; + } + stack.append(*x.at(i)); + stack.append(*y.at(i)); + i += 1; + }; + + return stack.span(); +} + + +fn flip_mod_2, impl TCopy: Copy, +NumberTrait>( + ref x: NullableVec +) -> Span { + let mut i = 0; + let mut res = ArrayTrait::new(); + loop { + if i == x.len / 2 { + break; + } + res.append(x.at(i * 2 + 1)); + res.append(x.at(i * 2)); + i += 1; + }; + + return res.span(); +} + +fn copy_to_vec< + T, MAG, +Drop, +Copy, +NumberTrait, +TryInto, +Into, +>( + x: Span +) -> NullableVec { + let mut res = NullableVecImpl::new(); + + let mut i = 0; + loop { + if i == x.len() { + break; + } + res.set(i, NumberTrait::new_unscaled((*x.at(i)).into(), false)); + i += 1; + }; + + return res; +} + +// return a span of len ceil((end - start) / step) +fn arange(start: usize, end: usize, step: usize) -> Span { + let mut arr = ArrayTrait::new(); + let mut i = start; + loop { + if i >= end { + break; + } + arr.append(i); + i += step; + }; + return arr.span(); +} + + +fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul,>( + pA: Span, start: usize +) -> T { + let mut i = start; + let mut prod = NumberTrait::one(); + loop { + if i == pA.len() { + break; + } + prod = prod * (*pA.at(i)); + i += 1; + }; + return prod; +} + +fn sum, +Copy, +NumberTrait, +TensorTrait, +AddEq,>( + a: Span, start: usize +) -> T { + let mut i = start; + let mut sum = NumberTrait::zero(); + loop { + if i == a.len() { + break; + } + sum += (*a.at(i)); + i += 1; + }; + return sum; +} diff --git a/src/operators/nn/implementations/nn_fp16x16.cairo b/src/operators/nn/implementations/nn_fp16x16.cairo index 12a991b01..a882dedd7 100644 --- a/src/operators/nn/implementations/nn_fp16x16.cairo +++ b/src/operators/nn/implementations/nn_fp16x16.cairo @@ -3,13 +3,13 @@ use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::numbers::fixed_point::implementations::fp16x16::core::FP16x16; use orion::operators::tensor::implementations::tensor_fp16x16::{ - FP16x16Tensor, FP16x16TensorDiv, FP16x16TensorAdd + FP16x16Tensor, FP16x16TensorDiv, FP16x16TensorAdd, FP16x16TensorMul }; use orion::numbers::fixed_point::implementations::fp16x16wide::core::{ FP16x16WImpl, FP16x16WTryIntoFP16x16, FP16x16W, FP16x16IntoFP16x16W }; use orion::operators::tensor::implementations::tensor_fp16x16wide::{ - FP16x16WTensor, FP16x16WTensorDiv, FP16x16WTensorAdd + FP16x16WTensor, FP16x16WTensorDiv, FP16x16WTensorAdd, FP16x16WTensorMul }; use orion::operators::nn::AUTO_PAD; @@ -166,6 +166,21 @@ impl FP16x16NN of NNTrait { storage_order, strides, output_len + fn deform_conv( + X: @Tensor, + W: @Tensor, + offset: @Tensor, + B: Option>, + mask: Option>, + dilations: Option>, + group: Option, + kernel_shape: Option>, + offset_group: Option, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::deform_conv::deform_conv( + X, W, offset, B, mask, dilations, group, kernel_shape, offset_group, pads, strides, ) } } diff --git a/src/operators/nn/implementations/nn_fp32x32.cairo b/src/operators/nn/implementations/nn_fp32x32.cairo index 0664981d4..f12276a0c 100644 --- a/src/operators/nn/implementations/nn_fp32x32.cairo +++ b/src/operators/nn/implementations/nn_fp32x32.cairo @@ -3,7 +3,7 @@ use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::numbers::fixed_point::implementations::fp32x32::core::{FP32x32, FP32x32Impl}; use orion::operators::tensor::implementations::tensor_fp32x32::{ - FP32x32Tensor, FP32x32TensorDiv, FP32x32TensorAdd + FP32x32Tensor, FP32x32TensorDiv, FP32x32TensorAdd, FP32x32TensorMul }; use orion::operators::nn::AUTO_PAD; @@ -160,6 +160,21 @@ impl FP32x32NN of NNTrait { storage_order, strides, output_len + fn deform_conv( + X: @Tensor, + W: @Tensor, + offset: @Tensor, + B: Option>, + mask: Option>, + dilations: Option>, + group: Option, + kernel_shape: Option>, + offset_group: Option, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::deform_conv::deform_conv( + X, W, offset, B, mask, dilations, group, kernel_shape, offset_group, pads, strides, ) } } diff --git a/src/operators/nn/implementations/nn_fp64x64.cairo b/src/operators/nn/implementations/nn_fp64x64.cairo index e60148d30..84fb5d604 100644 --- a/src/operators/nn/implementations/nn_fp64x64.cairo +++ b/src/operators/nn/implementations/nn_fp64x64.cairo @@ -3,7 +3,7 @@ use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::numbers::fixed_point::implementations::fp64x64::core::{FP64x64, FP64x64Impl}; use orion::operators::tensor::implementations::tensor_fp64x64::{ - FP64x64Tensor, FP64x64TensorDiv, FP64x64TensorAdd + FP64x64Tensor, FP64x64TensorDiv, FP64x64TensorAdd, FP64x64TensorMul }; use orion::operators::nn::AUTO_PAD; @@ -160,6 +160,21 @@ impl FP64x64NN of NNTrait { storage_order, strides, output_len + fn deform_conv( + X: @Tensor, + W: @Tensor, + offset: @Tensor, + B: Option>, + mask: Option>, + dilations: Option>, + group: Option, + kernel_shape: Option>, + offset_group: Option, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::deform_conv::deform_conv( + X, W, offset, B, mask, dilations, group, kernel_shape, offset_group, pads, strides, ) } } diff --git a/src/operators/nn/implementations/nn_fp8x23.cairo b/src/operators/nn/implementations/nn_fp8x23.cairo index 38d9bed74..f25bfa86e 100644 --- a/src/operators/nn/implementations/nn_fp8x23.cairo +++ b/src/operators/nn/implementations/nn_fp8x23.cairo @@ -3,7 +3,7 @@ use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::numbers::fixed_point::implementations::fp8x23::core::FP8x23; use orion::operators::tensor::implementations::tensor_fp8x23::{ - FP8x23Tensor, FP8x23TensorDiv, FP8x23TensorAdd + FP8x23Tensor, FP8x23TensorDiv, FP8x23TensorAdd, FP8x23TensorMul }; use orion::numbers::fixed_point::implementations::fp8x23wide::core::{ FP8x23WImpl, FP8x23WTryIntoFP8x23, FP8x23W, FP8x23IntoFP8x23W @@ -162,6 +162,21 @@ impl FP8x23NN of NNTrait { storage_order, strides, output_len + fn deform_conv( + X: @Tensor, + W: @Tensor, + offset: @Tensor, + B: Option>, + mask: Option>, + dilations: Option>, + group: Option, + kernel_shape: Option>, + offset_group: Option, + pads: Option>, + strides: Option>, + ) -> Tensor { + functional::deform_conv::deform_conv( + X, W, offset, B, mask, dilations, group, kernel_shape, offset_group, pads, strides, ) } } diff --git a/src/operators/nn/implementations/nn_i32.cairo b/src/operators/nn/implementations/nn_i32.cairo index b427eadb7..f09e34ef7 100644 --- a/src/operators/nn/implementations/nn_i32.cairo +++ b/src/operators/nn/implementations/nn_i32.cairo @@ -2,7 +2,6 @@ use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::operators::tensor::implementations::tensor_i32::{I32Tensor, I32TensorAdd}; -use orion::operators::nn::AUTO_PAD; impl I32NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -131,18 +130,4 @@ impl I32NN of NNTrait { ) -> Tensor { functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides) } - - fn max_pool( - X: @Tensor, - auto_pad: Option, - ceil_mode: Option, - dilations: Option>, - kernel_shape: Span, - pads: Option>, - storage_order: Option, - strides: Option>, - output_len: usize, - ) -> (Tensor, Option>) { - panic(array!['not supported!']) - } } diff --git a/src/operators/nn/implementations/nn_i8.cairo b/src/operators/nn/implementations/nn_i8.cairo index 18398eca2..befc8b8b0 100644 --- a/src/operators/nn/implementations/nn_i8.cairo +++ b/src/operators/nn/implementations/nn_i8.cairo @@ -2,7 +2,6 @@ use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::operators::tensor::implementations::tensor_i8::{I8Tensor, I8TensorAdd}; -use orion::operators::nn::AUTO_PAD; impl I8NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -131,18 +130,4 @@ impl I8NN of NNTrait { ) -> Tensor { functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides) } - - fn max_pool( - X: @Tensor, - auto_pad: Option, - ceil_mode: Option, - dilations: Option>, - kernel_shape: Span, - pads: Option>, - storage_order: Option, - strides: Option>, - output_len: usize, - ) -> (Tensor, Option>) { - panic(array!['not supported!']) - } } diff --git a/src/operators/nn/implementations/nn_u32.cairo b/src/operators/nn/implementations/nn_u32.cairo index a76f0528b..1cd4c926f 100644 --- a/src/operators/nn/implementations/nn_u32.cairo +++ b/src/operators/nn/implementations/nn_u32.cairo @@ -2,7 +2,6 @@ use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::operators::tensor::implementations::tensor_u32::{U32Tensor, U32TensorAdd}; -use orion::operators::nn::AUTO_PAD; impl U32NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -131,18 +130,4 @@ impl U32NN of NNTrait { ) -> Tensor { functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides) } - - fn max_pool( - X: @Tensor, - auto_pad: Option, - ceil_mode: Option, - dilations: Option>, - kernel_shape: Span, - pads: Option>, - storage_order: Option, - strides: Option>, - output_len: usize, - ) -> (Tensor, Option>) { - panic(array!['not supported!']) - } } diff --git a/tests/nodes.cairo b/tests/nodes.cairo index 665c20106..95c1c87a3 100644 --- a/tests/nodes.cairo +++ b/tests/nodes.cairo @@ -1001,3 +1001,7 @@ mod argmax_negative_axis_keepdims; mod argmax_negative_axis_keepdims_select_last_index; mod argmax_no_keepdims; mod argmax_no_keepdims_select_last_index; +mod deform_conv_with_padding; +mod deform_conv_with_mask_bias; +mod deform_conv_with_multiple_offset_groups; +mod deform_conv; diff --git a/tests/nodes/deform_conv.cairo b/tests/nodes/deform_conv.cairo new file mode 100644 index 000000000..4568731a7 --- /dev/null +++ b/tests/nodes/deform_conv.cairo @@ -0,0 +1,36 @@ +mod input_0; +mod input_1; +mod input_2; +mod output_0; + + +use orion::numbers::FixedTrait; +use orion::operators::nn::NNTrait; +use orion::operators::nn::FP16x16NN; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_deform_conv() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::deform_conv( + @input_0, + @input_1, + @input_2, + Option::None, + Option::None, + Option::None, + Option::None, + Option::Some(array![2, 2].span()), + Option::None, + Option::None, + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/deform_conv/input_0.cairo b/tests/nodes/deform_conv/input_0.cairo new file mode 100644 index 000000000..5208e1993 --- /dev/null +++ b/tests/nodes/deform_conv/input_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv/input_1.cairo b/tests/nodes/deform_conv/input_1.cairo new file mode 100644 index 000000000..fd236fd56 --- /dev/null +++ b/tests/nodes/deform_conv/input_1.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv/input_2.cairo b/tests/nodes/deform_conv/input_2.cairo new file mode 100644 index 000000000..f292a30c7 --- /dev/null +++ b/tests/nodes/deform_conv/input_2.cairo @@ -0,0 +1,47 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(8); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 32768, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 6553, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv/output_0.cairo b/tests/nodes/deform_conv/output_0.cairo new file mode 100644 index 000000000..0405486ac --- /dev/null +++ b/tests/nodes/deform_conv/output_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 622592, sign: false }); + data.append(FP16x16 { mag: 779878, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_mask_bias.cairo b/tests/nodes/deform_conv_with_mask_bias.cairo new file mode 100644 index 000000000..2627fa720 --- /dev/null +++ b/tests/nodes/deform_conv_with_mask_bias.cairo @@ -0,0 +1,40 @@ +mod input_0; +mod input_1; +mod input_2; +mod input_3; +mod input_4; +mod output_0; + + +use orion::numbers::FixedTrait; +use orion::operators::nn::NNTrait; +use orion::operators::nn::FP16x16NN; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_deform_conv_with_mask_bias() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let input_3 = input_3::input_3(); + let input_4 = input_4::input_4(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::deform_conv( + @input_0, + @input_1, + @input_2, + Option::Some(input_3.data), + Option::Some(input_4), + Option::None, + Option::None, + Option::Some(array![2, 2].span()), + Option::None, + Option::None, + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/deform_conv_with_mask_bias/input_0.cairo b/tests/nodes/deform_conv_with_mask_bias/input_0.cairo new file mode 100644 index 000000000..5208e1993 --- /dev/null +++ b/tests/nodes/deform_conv_with_mask_bias/input_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_mask_bias/input_1.cairo b/tests/nodes/deform_conv_with_mask_bias/input_1.cairo new file mode 100644 index 000000000..fd236fd56 --- /dev/null +++ b/tests/nodes/deform_conv_with_mask_bias/input_1.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_mask_bias/input_2.cairo b/tests/nodes/deform_conv_with_mask_bias/input_2.cairo new file mode 100644 index 000000000..f292a30c7 --- /dev/null +++ b/tests/nodes/deform_conv_with_mask_bias/input_2.cairo @@ -0,0 +1,47 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(8); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 32768, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 6553, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_mask_bias/input_3.cairo b/tests/nodes/deform_conv_with_mask_bias/input_3.cairo new file mode 100644 index 000000000..805491b57 --- /dev/null +++ b/tests/nodes/deform_conv_with_mask_bias/input_3.cairo @@ -0,0 +1,13 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_3() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_mask_bias/input_4.cairo b/tests/nodes/deform_conv_with_mask_bias/input_4.cairo new file mode 100644 index 000000000..13381e7f3 --- /dev/null +++ b/tests/nodes/deform_conv_with_mask_bias/input_4.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_4() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(4); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 13107, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_mask_bias/output_0.cairo b/tests/nodes/deform_conv_with_mask_bias/output_0.cairo new file mode 100644 index 000000000..4e2ac6dc2 --- /dev/null +++ b/tests/nodes/deform_conv_with_mask_bias/output_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 688128, sign: false }); + data.append(FP16x16 { mag: 845414, sign: false }); + data.append(FP16x16 { mag: 1376256, sign: false }); + data.append(FP16x16 { mag: 1271398, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_multiple_offset_groups.cairo b/tests/nodes/deform_conv_with_multiple_offset_groups.cairo new file mode 100644 index 000000000..91840d1c4 --- /dev/null +++ b/tests/nodes/deform_conv_with_multiple_offset_groups.cairo @@ -0,0 +1,36 @@ +mod input_0; +mod input_1; +mod input_2; +mod output_0; + + +use orion::numbers::FixedTrait; +use orion::operators::nn::NNTrait; +use orion::operators::nn::FP16x16NN; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_deform_conv_with_multiple_offset_groups() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::deform_conv( + @input_0, + @input_1, + @input_2, + Option::None, + Option::None, + Option::None, + Option::None, + Option::Some(array![2, 2].span()), + Option::Some(2), + Option::None, + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/deform_conv_with_multiple_offset_groups/input_0.cairo b/tests/nodes/deform_conv_with_multiple_offset_groups/input_0.cairo new file mode 100644 index 000000000..e628eeb6d --- /dev/null +++ b/tests/nodes/deform_conv_with_multiple_offset_groups/input_0.cairo @@ -0,0 +1,33 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_multiple_offset_groups/input_1.cairo b/tests/nodes/deform_conv_with_multiple_offset_groups/input_1.cairo new file mode 100644 index 000000000..a6c0269ea --- /dev/null +++ b/tests/nodes/deform_conv_with_multiple_offset_groups/input_1.cairo @@ -0,0 +1,23 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(2); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_multiple_offset_groups/input_2.cairo b/tests/nodes/deform_conv_with_multiple_offset_groups/input_2.cairo new file mode 100644 index 000000000..1c59d0824 --- /dev/null +++ b/tests/nodes/deform_conv_with_multiple_offset_groups/input_2.cairo @@ -0,0 +1,79 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(16); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 32768, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 6553, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_multiple_offset_groups/output_0.cairo b/tests/nodes/deform_conv_with_multiple_offset_groups/output_0.cairo new file mode 100644 index 000000000..9483db7b7 --- /dev/null +++ b/tests/nodes/deform_conv_with_multiple_offset_groups/output_0.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 2195456, sign: false }); + data.append(FP16x16 { mag: 2103705, sign: false }); + data.append(FP16x16 { mag: 2097152, sign: false }); + data.append(FP16x16 { mag: 2097152, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_padding.cairo b/tests/nodes/deform_conv_with_padding.cairo new file mode 100644 index 000000000..553b33a28 --- /dev/null +++ b/tests/nodes/deform_conv_with_padding.cairo @@ -0,0 +1,36 @@ +mod input_0; +mod input_1; +mod input_2; +mod output_0; + + +use orion::numbers::FixedTrait; +use orion::operators::nn::NNTrait; +use orion::operators::nn::FP16x16NN; +use orion::utils::{assert_eq, assert_seq_eq}; +use orion::operators::tensor::FP16x16TensorPartialEq; + +#[test] +#[available_gas(2000000000)] +fn test_deform_conv_with_padding() { + let input_0 = input_0::input_0(); + let input_1 = input_1::input_1(); + let input_2 = input_2::input_2(); + let z_0 = output_0::output_0(); + + let y_0 = NNTrait::deform_conv( + @input_0, + @input_1, + @input_2, + Option::None, + Option::None, + Option::None, + Option::None, + Option::Some(array![2, 2].span()), + Option::None, + Option::Some(array![1, 1, 1, 1].span()), + Option::None + ); + + assert_eq(y_0, z_0); +} diff --git a/tests/nodes/deform_conv_with_padding/input_0.cairo b/tests/nodes/deform_conv_with_padding/input_0.cairo new file mode 100644 index 000000000..5208e1993 --- /dev/null +++ b/tests/nodes/deform_conv_with_padding/input_0.cairo @@ -0,0 +1,24 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(3); + shape.append(3); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 262144, sign: false }); + data.append(FP16x16 { mag: 327680, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_padding/input_1.cairo b/tests/nodes/deform_conv_with_padding/input_1.cairo new file mode 100644 index 000000000..fd236fd56 --- /dev/null +++ b/tests/nodes/deform_conv_with_padding/input_1.cairo @@ -0,0 +1,19 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_1() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(2); + shape.append(2); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_padding/input_2.cairo b/tests/nodes/deform_conv_with_padding/input_2.cairo new file mode 100644 index 000000000..1c854d398 --- /dev/null +++ b/tests/nodes/deform_conv_with_padding/input_2.cairo @@ -0,0 +1,143 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn input_2() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(8); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 32768, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 6553, sign: true }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 0, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} diff --git a/tests/nodes/deform_conv_with_padding/output_0.cairo b/tests/nodes/deform_conv_with_padding/output_0.cairo new file mode 100644 index 000000000..a5444d5fd --- /dev/null +++ b/tests/nodes/deform_conv_with_padding/output_0.cairo @@ -0,0 +1,31 @@ +use core::array::{ArrayTrait, SpanTrait}; +use orion::operators::tensor::{TensorTrait, Tensor}; +use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorAdd}; +use orion::numbers::{FixedTrait, FP16x16}; + +fn output_0() -> Tensor { + let mut shape = ArrayTrait::::new(); + shape.append(1); + shape.append(1); + shape.append(4); + shape.append(4); + + let mut data = ArrayTrait::new(); + data.append(FP16x16 { mag: 0, sign: false }); + data.append(FP16x16 { mag: 65536, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 131072, sign: false }); + data.append(FP16x16 { mag: 196608, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + data.append(FP16x16 { mag: 779878, sign: false }); + data.append(FP16x16 { mag: 458752, sign: false }); + data.append(FP16x16 { mag: 589824, sign: false }); + data.append(FP16x16 { mag: 1310720, sign: false }); + data.append(FP16x16 { mag: 1572864, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 393216, sign: false }); + data.append(FP16x16 { mag: 851968, sign: false }); + data.append(FP16x16 { mag: 983040, sign: false }); + data.append(FP16x16 { mag: 524288, sign: false }); + TensorTrait::new(shape.span(), data.span()) +} From 5d3802a56bd66b5836ddf2409a52715b9b8c604e Mon Sep 17 00:00:00 2001 From: chachaleo Date: Mon, 4 Mar 2024 12:50:58 +0100 Subject: [PATCH 66/68] fix: while loops --- src/operators/nn/functional/deform_conv.cairo | 140 +++++------------- 1 file changed, 36 insertions(+), 104 deletions(-) diff --git a/src/operators/nn/functional/deform_conv.cairo b/src/operators/nn/functional/deform_conv.cairo index 1e43e36b7..c8ffc7c3a 100644 --- a/src/operators/nn/functional/deform_conv.cairo +++ b/src/operators/nn/functional/deform_conv.cairo @@ -47,15 +47,14 @@ fn deform_conv< strides: Option>, ) -> Tensor { assert((*X).shape.len() >= 3, 'X must have at least 3 dim'); + assert((*W).shape.len() >= 3, 'X must have at least 3 dim'); + let dilations = match dilations { Option::Some(dilations) => dilations, Option::None => { let mut dilations = ArrayTrait::new(); let mut i = 2; - loop { - if i >= (*X).shape.len() { - break; - } + while i != (*X).shape.len() { dilations.append(1); i += 1; }; @@ -67,10 +66,7 @@ fn deform_conv< Option::None => { let mut kernel_shape = ArrayTrait::new(); let mut i = 2; - loop { - if i >= (*W).shape.len() { - break; - } + while i != (*W).shape.len() { kernel_shape.append(*(*W).shape.at(i)); i += 1; }; @@ -82,10 +78,7 @@ fn deform_conv< Option::None => { let mut pads = ArrayTrait::new(); let mut i = 2; - loop { - if i >= (*X).shape.len() { - break; - } + while i != (*X).shape.len() { pads.append(0); pads.append(0); i += 1; @@ -98,10 +91,7 @@ fn deform_conv< Option::None => { let mut strides = ArrayTrait::new(); let mut i = 2; - loop { - if i >= (*X).shape.len() { - break; - } + while i != (*X).shape.len() { strides.append(1); i += 1; }; @@ -149,11 +139,9 @@ fn deform_conv< Option::Some(mask) => mask, Option::None => { let mut mask = ArrayTrait::::new(); + let mask_end = n * offset_group * prod(kernel_shape, 0) * prod(output_shape, 0); let mut i = 0; - loop { - if i == n * offset_group * prod(kernel_shape, 0) * prod(output_shape, 0) { - break; - } + while i != mask_end { mask.append(NumberTrait::::one()); i += 1; }; @@ -220,26 +208,14 @@ fn deform_conv< match B { Option::Some(B) => { let mut i = 0; - loop { - if i == n { - break; - } + while i != n { let mut j = 0; - loop { - if j == oc { - break; - } + while j != oc { let b_j = *B.at(j); let mut k = 0; - loop { - if k == oh { - break; - } + while k != oh { let mut l = 0; - loop { - if l == ow { - break; - } + while l != ow { res .set( i * *res_stride.at(0) @@ -277,35 +253,20 @@ fn deform_conv< let two: T = NumberTrait::one() + NumberTrait::one(); let mut batch_idx = 0; - loop { - if batch_idx == n { - break; - } + while batch_idx != n { let mut oc_idx = 0; - loop { - if oc_idx == oc { - break; - } + while oc_idx != oc { let mut ic_idx = 0; - loop { - if ic_idx == ic { - break; - } + while ic_idx != ic { if (ic_idx / ics_per_group) == (oc_idx / ocs_per_group) { let offset_group_idx = ic_idx / ics_per_offset_group; let mut i = 0; - loop { - if i == oh { - break; - } + while i != oh { let index = NumberTrait::new_unscaled(i.into(), false); let h_coord = bh + sth * index; let mut j = 0; - loop { - if j == ow { - break; - } + while j != ow { let jndex = NumberTrait::new_unscaled(j.into(), false); let w_coord = bw + stw * jndex; @@ -315,15 +276,9 @@ fn deform_conv< let mut offset_TEST = ArrayTrait::new(); let mut hi = 0; - loop { - if hi == ks0 { - break; - } + while hi != ks0 { let mut wi = 0; - loop { - if wi == ks1 { - break; - } + while wi != ks1 { let elem1 = h_coord + *offset .data @@ -461,10 +416,7 @@ fn deform_conv< let mut res_data = ArrayTrait::new(); let mut i = 0; - loop { - if i == res.len() { - break; - } + while i != res.len() { res_data.append(res.at(i)); i += 1; }; @@ -480,17 +432,11 @@ fn meshgrid(x: Span, y: Span) -> (Span, Span) { let mut yv = ArrayTrait::new(); let mut i = 0; - loop { - if i == y.len() { - break; - } + while i != y.len() { xv.append_span(x); let mut j = 0; - loop { - if j == x.len() { - break; - } + while j != x.len() { yv.append(*y.at(i)); j += 1; }; @@ -503,10 +449,7 @@ fn stack(x: Span, y: Span) -> Span { let mut stack = ArrayTrait::new(); let mut i = 0; - loop { - if i == x.len() { - break; - } + while i != x.len() { stack.append(*x.at(i)); stack.append(*y.at(i)); i += 1; @@ -521,10 +464,7 @@ fn flip_mod_2, impl TCopy: Copy, +NumberTrait Span { let mut i = 0; let mut res = ArrayTrait::new(); - loop { - if i == x.len / 2 { - break; - } + while i != x.len / 2 { res.append(x.at(i * 2 + 1)); res.append(x.at(i * 2)); i += 1; @@ -541,10 +481,7 @@ fn copy_to_vec< let mut res = NullableVecImpl::new(); let mut i = 0; - loop { - if i == x.len() { - break; - } + while i != x.len() { res.set(i, NumberTrait::new_unscaled((*x.at(i)).into(), false)); i += 1; }; @@ -556,10 +493,7 @@ fn copy_to_vec< fn arange(start: usize, end: usize, step: usize) -> Span { let mut arr = ArrayTrait::new(); let mut i = start; - loop { - if i >= end { - break; - } + while i != end { arr.append(i); i += step; }; @@ -568,29 +502,27 @@ fn arange(start: usize, end: usize, step: usize) -> Span { fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul,>( - pA: Span, start: usize + a: Span, start: usize ) -> T { - let mut i = start; + assert(a.len() > start, 'wrong input dim'); let mut prod = NumberTrait::one(); - loop { - if i == pA.len() { - break; - } - prod = prod * (*pA.at(i)); + let mut i = start; + while i != a.len() { + prod = prod * (*a.at(i)); i += 1; }; return prod; } + + fn sum, +Copy, +NumberTrait, +TensorTrait, +AddEq,>( a: Span, start: usize ) -> T { - let mut i = start; + assert(a.len() > start, 'wrong input dim'); let mut sum = NumberTrait::zero(); - loop { - if i == a.len() { - break; - } + let mut i = start; + while i != a.len() { sum += (*a.at(i)); i += 1; }; From a0ce46eb9b9ceca471897c72ab0c07255feb673a Mon Sep 17 00:00:00 2001 From: chachaleo Date: Mon, 22 Apr 2024 03:26:07 +0200 Subject: [PATCH 67/68] fix: to match reshape new signature --- src/operators/nn/functional/deform_conv.cairo | 36 ++++++++++++++----- .../nn/implementations/nn_fp16x16.cairo | 3 +- .../nn/implementations/nn_fp32x32.cairo | 3 +- .../nn/implementations/nn_fp64x64.cairo | 3 +- .../nn/implementations/nn_fp8x23.cairo | 3 +- src/operators/nn/implementations/nn_i32.cairo | 31 ++++++++++++++++ src/operators/nn/implementations/nn_i8.cairo | 31 ++++++++++++++++ src/operators/nn/implementations/nn_u32.cairo | 31 ++++++++++++++++ 8 files changed, 128 insertions(+), 13 deletions(-) diff --git a/src/operators/nn/functional/deform_conv.cairo b/src/operators/nn/functional/deform_conv.cairo index c8ffc7c3a..bb04b11c8 100644 --- a/src/operators/nn/functional/deform_conv.cairo +++ b/src/operators/nn/functional/deform_conv.cairo @@ -128,12 +128,12 @@ fn deform_conv< 'offset_group inconsistencies' ); - let mut offset_shape = array![n, offset_group]; - offset_shape.append_span(kernel_shape); - offset_shape.append(kernel_shape.len()); - offset_shape.append_span(output_shape); + let mut offset_shape = array![n.into(), offset_group.into()]; + offset_shape.append_span(span_U32_to_span_I32(kernel_shape.clone())); + offset_shape.append(kernel_shape.len().into()); + offset_shape.append_span(span_U32_to_span_I32(output_shape.clone())); - let offset = offset.reshape(offset_shape.span()); + let offset = offset.reshape(offset_shape.span(), false); let mask = match mask { Option::Some(mask) => mask, @@ -151,10 +151,10 @@ fn deform_conv< }, }; - let mut mask_shape = array![n, offset_group]; - mask_shape.append_span(kernel_shape); - mask_shape.append_span(output_shape); - let mask = mask.reshape(mask_shape.span()); + let mut mask_shape = array![n.into(), offset_group.into()]; + mask_shape.append_span(span_U32_to_span_I32(kernel_shape.clone())); + mask_shape.append_span(span_U32_to_span_I32(output_shape.clone())); + let mask = mask.reshape(mask_shape.span(), false); if (*X).shape.len() == 4 { let ih: T = NumberTrait::new_unscaled((*(*X).shape.at(2)).into(), false); @@ -528,3 +528,21 @@ fn sum, +Copy, +NumberTrait, +TensorTrait, +AddEq }; return sum; } + + +fn span_U32_to_span_I32( + mut x: Span +) -> Span { + let mut res = ArrayTrait::new(); + + loop { + match x.pop_front() { + Option::Some(v) => { + res.append((*v).into()); + }, + Option::None => { break; } + }; + }; + + return res.span(); +} \ No newline at end of file diff --git a/src/operators/nn/implementations/nn_fp16x16.cairo b/src/operators/nn/implementations/nn_fp16x16.cairo index a882dedd7..05faad2ba 100644 --- a/src/operators/nn/implementations/nn_fp16x16.cairo +++ b/src/operators/nn/implementations/nn_fp16x16.cairo @@ -165,7 +165,8 @@ impl FP16x16NN of NNTrait { pads, storage_order, strides, - output_len + output_len) + } fn deform_conv( X: @Tensor, W: @Tensor, diff --git a/src/operators/nn/implementations/nn_fp32x32.cairo b/src/operators/nn/implementations/nn_fp32x32.cairo index f12276a0c..a1ca177dd 100644 --- a/src/operators/nn/implementations/nn_fp32x32.cairo +++ b/src/operators/nn/implementations/nn_fp32x32.cairo @@ -159,7 +159,8 @@ impl FP32x32NN of NNTrait { pads, storage_order, strides, - output_len + output_len) + } fn deform_conv( X: @Tensor, W: @Tensor, diff --git a/src/operators/nn/implementations/nn_fp64x64.cairo b/src/operators/nn/implementations/nn_fp64x64.cairo index 84fb5d604..6d6770551 100644 --- a/src/operators/nn/implementations/nn_fp64x64.cairo +++ b/src/operators/nn/implementations/nn_fp64x64.cairo @@ -159,7 +159,8 @@ impl FP64x64NN of NNTrait { pads, storage_order, strides, - output_len + output_len) + } fn deform_conv( X: @Tensor, W: @Tensor, diff --git a/src/operators/nn/implementations/nn_fp8x23.cairo b/src/operators/nn/implementations/nn_fp8x23.cairo index f25bfa86e..924b16d34 100644 --- a/src/operators/nn/implementations/nn_fp8x23.cairo +++ b/src/operators/nn/implementations/nn_fp8x23.cairo @@ -161,7 +161,8 @@ impl FP8x23NN of NNTrait { pads, storage_order, strides, - output_len + output_len) + } fn deform_conv( X: @Tensor, W: @Tensor, diff --git a/src/operators/nn/implementations/nn_i32.cairo b/src/operators/nn/implementations/nn_i32.cairo index f09e34ef7..973dfb552 100644 --- a/src/operators/nn/implementations/nn_i32.cairo +++ b/src/operators/nn/implementations/nn_i32.cairo @@ -2,6 +2,8 @@ use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::operators::tensor::implementations::tensor_i32::{I32Tensor, I32TensorAdd}; +use orion::operators::nn::AUTO_PAD; + impl I32NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -130,4 +132,33 @@ impl I32NN of NNTrait { ) -> Tensor { functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides) } + + fn max_pool( + X: @Tensor, + auto_pad: Option, + ceil_mode: Option, + dilations: Option>, + kernel_shape: Span, + pads: Option>, + storage_order: Option, + strides: Option>, + output_len: usize, + ) -> (Tensor, Option>) { + panic(array!['not supported!']) + } + fn deform_conv( + X: @Tensor, + W: @Tensor, + offset: @Tensor, + B: Option>, + mask: Option>, + dilations: Option>, + group: Option, + kernel_shape: Option>, + offset_group: Option, + pads: Option>, + strides: Option>, + ) -> Tensor { + panic(array!['not supported!']) + } } diff --git a/src/operators/nn/implementations/nn_i8.cairo b/src/operators/nn/implementations/nn_i8.cairo index befc8b8b0..d48e398df 100644 --- a/src/operators/nn/implementations/nn_i8.cairo +++ b/src/operators/nn/implementations/nn_i8.cairo @@ -2,6 +2,8 @@ use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::operators::tensor::implementations::tensor_i8::{I8Tensor, I8TensorAdd}; +use orion::operators::nn::AUTO_PAD; + impl I8NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -130,4 +132,33 @@ impl I8NN of NNTrait { ) -> Tensor { functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides) } + + fn max_pool( + X: @Tensor, + auto_pad: Option, + ceil_mode: Option, + dilations: Option>, + kernel_shape: Span, + pads: Option>, + storage_order: Option, + strides: Option>, + output_len: usize, + ) -> (Tensor, Option>) { + panic(array!['not supported!']) + } + fn deform_conv( + X: @Tensor, + W: @Tensor, + offset: @Tensor, + B: Option>, + mask: Option>, + dilations: Option>, + group: Option, + kernel_shape: Option>, + offset_group: Option, + pads: Option>, + strides: Option>, + ) -> Tensor { + panic(array!['not supported!']) + } } diff --git a/src/operators/nn/implementations/nn_u32.cairo b/src/operators/nn/implementations/nn_u32.cairo index 1cd4c926f..504a8199b 100644 --- a/src/operators/nn/implementations/nn_u32.cairo +++ b/src/operators/nn/implementations/nn_u32.cairo @@ -2,6 +2,8 @@ use orion::operators::tensor::core::Tensor; use orion::operators::nn::core::NNTrait; use orion::operators::nn::functional; use orion::operators::tensor::implementations::tensor_u32::{U32Tensor, U32TensorAdd}; +use orion::operators::nn::AUTO_PAD; + impl U32NN of NNTrait { fn relu(tensor: @Tensor) -> Tensor { @@ -130,4 +132,33 @@ impl U32NN of NNTrait { ) -> Tensor { functional::conv::conv(X, W, B, auto_pad, dilations, group, kernel_shape, pads, strides) } + + fn max_pool( + X: @Tensor, + auto_pad: Option, + ceil_mode: Option, + dilations: Option>, + kernel_shape: Span, + pads: Option>, + storage_order: Option, + strides: Option>, + output_len: usize, + ) -> (Tensor, Option>) { + panic(array!['not supported!']) + } + fn deform_conv( + X: @Tensor, + W: @Tensor, + offset: @Tensor, + B: Option>, + mask: Option>, + dilations: Option>, + group: Option, + kernel_shape: Option>, + offset_group: Option, + pads: Option>, + strides: Option>, + ) -> Tensor { + panic(array!['not supported!']) + } } From f0e72877b71b4fb06a7913ecfc4fb7a584141314 Mon Sep 17 00:00:00 2001 From: raphaelDkhn Date: Mon, 22 Apr 2024 09:35:49 +0200 Subject: [PATCH 68/68] fix col2im and conv_transpose --- src/operators/nn/functional/col2im.cairo | 33 --- .../nn/functional/conv_transpose.cairo | 189 ------------------ 2 files changed, 222 deletions(-) diff --git a/src/operators/nn/functional/col2im.cairo b/src/operators/nn/functional/col2im.cairo index f0cf60501..05f229863 100644 --- a/src/operators/nn/functional/col2im.cairo +++ b/src/operators/nn/functional/col2im.cairo @@ -268,36 +268,3 @@ fn get_indices(index: usize, shape: Span,) -> Array { new_res } - -fn is_out(ind: Span, shape: Span,) -> bool { - let mut n = 0; - let is_out = loop { - if n == ind.len() { - break false; - } - let s = *shape.at(n); - let i = *ind.at(n); - if i < 0 { - break true; - } - if i >= s { - break true; - } - n += 1; - }; - - is_out -} - -fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul,>( - pA: Span, start: usize -) -> T { - let mut i = start; - let mut prod = NumberTrait::one(); - while i != pA.len() { - prod = prod * (*pA.at(i)); - i += 1; - }; - - prod -} diff --git a/src/operators/nn/functional/conv_transpose.cairo b/src/operators/nn/functional/conv_transpose.cairo index 7c2c8c037..04e93fea5 100644 --- a/src/operators/nn/functional/conv_transpose.cairo +++ b/src/operators/nn/functional/conv_transpose.cairo @@ -486,192 +486,3 @@ fn conv_transpose< TensorTrait::new(shape.span(), final.span()) } - -fn get_image, +Copy>(self: @Tensor, row: usize) -> Span { - assert((*self).shape.len() == 2, 'Expected a 2D tensor'); - - let row_length = *self.shape[1]; - let start = row * row_length; - - (*self).data.slice(start, row_length) -} - -fn col2im_naive_implementation< - T, MAG, +TensorTrait, +NumberTrait, +Copy, +Drop, +Add, ->( - data: @Tensor, - image_shape: Span, - kernel_shape: Span, - dilations: Span, - pads: Span, - strides: Span, -) -> NullableVec { - let n_dims = pads.len() / 2; - - col2im_shape_check(data, image_shape, kernel_shape, dilations, pads, strides); - - let mut dim_col: Array = array![]; - let mut i = 0; - while i != n_dims { - dim_col - .append( - (*image_shape.at(i) - + (*pads.at(i) + *pads.at(i + n_dims)) - - (*dilations.at(i) * (*kernel_shape.at(i) - 1) + 1)) - / *strides.at(i) - + 1 - ); - - i += 1; - }; - - let dim_col = dim_col.span(); - - let stride_img = stride(image_shape); - - let mut data_im = NullableVecImpl::new(); - data_im.set(*image_shape.at(0) * *stride_img.at(0) - 1, NumberTrait::zero()); - - let kernel_size = prod(kernel_shape, 0); - let col_size = prod(dim_col, 0); - let mut c_col = 0; - while c_col != kernel_size { - let offset = get_indices(c_col, kernel_shape).span(); - - let mut col = 0; - while col != col_size { - let ind_col = get_indices(col, dim_col).span(); - let mut ind_im: Array = array![]; - let mut i = 0; - while i != n_dims { - if (*ind_col.at(i) * *strides.at(i) + *offset.at(i) * *dilations.at(i)) < *pads - .at(i) { - let neg_index = *pads.at(i) - - (*ind_col.at(i) * *strides.at(i) + *offset.at(i) * *dilations.at(i)); - ind_im.append(*image_shape.at(i) + neg_index); - } else { - ind_im - .append( - *ind_col.at(i) * *strides.at(i) - + *offset.at(i) * *dilations.at(i) - - *pads.at(i) - ); - } - - i += 1; - }; - - let ind_im = ind_im.span(); - if !is_out(ind_im, image_shape) { - let mut index = 0; - let mut i = 0; - while i != image_shape.len() { - index += *stride_img.at(i) * *ind_im.at(i); - i += 1; - }; - - data_im.set(index, data_im.at(index) + *(*data).data.at(c_col * col_size + col)); - } - - col += 1; - }; - - c_col += 1; - }; - - data_im -} - -fn col2im_shape_check, +Copy, +Drop,>( - X: @Tensor, - output_shape: Span, - kernel_shape: Span, - dilations: Span, - pads: Span, - strides: Span, -) { - let n_input_plane = *(*X).shape.at(0); - - let kernel_size = prod(kernel_shape, 0); - - assert(n_input_plane % kernel_size == 0, 'wrong input dimension'); - - let input_length = *(*X).shape.at(1); - let n_dims = output_shape.len(); - let mut n_blocks: Array = array![]; - - let mut i = 0; - while i != n_dims { - n_blocks - .append( - (*output_shape.at(i) - + (*pads.at(i) + *pads.at(i + n_dims)) - - *dilations.at(i) * (*kernel_shape.at(i) - 1) - - 1) - / *strides.at(i) - + 1 - ); - i += 1; - }; - - let block_size = prod(n_blocks.span(), 0); - - assert(input_length == block_size, 'input_length != block_size'); -} - - -fn get_indices(index: usize, shape: Span,) -> Array { - let mut i = index; - let mut res: Array = array![]; - let mut k = shape.len() - 1; - while k != 0 { - let m = i % *shape.at(k); - res.append(m); - i -= m; - i /= *shape.at(k); - k -= 1; - }; - - let mut new_res: Array = array![]; - new_res.append(i); - let mut i = shape.len() - 1; - while i != 0 { - new_res.append(*res.at(i - 1)); - i -= 1; - }; - - new_res -} - -fn is_out(ind: Span, shape: Span,) -> bool { - let mut n = 0; - let is_out = loop { - if n == ind.len() { - break false; - } - let s = *shape.at(n); - let i = *ind.at(n); - if i < 0 { - break true; - } - if i >= s { - break true; - } - n += 1; - }; - - is_out -} - -fn prod, +Copy, +NumberTrait, +TensorTrait, +Mul,>( - pA: Span, start: usize -) -> T { - let mut i = start; - let mut prod = NumberTrait::one(); - while i != pA.len() { - prod = prod * (*pA.at(i)); - i += 1; - }; - - prod -}