Skip to content

Commit

Permalink
impls
Browse files Browse the repository at this point in the history
  • Loading branch information
lancenonce committed Apr 12, 2024
1 parent 4f0f9b6 commit c5b5685
Show file tree
Hide file tree
Showing 17 changed files with 157 additions and 5 deletions.
50 changes: 49 additions & 1 deletion src/operators/tensor/core.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,6 @@ impl TensorSerde<T, impl TSerde: Serde<T>, impl TDrop: Drop<T>> of Serde<Tensor<
/// pow - Pow takes input data (Tensor) and exponent Tensor, and produces one output data (Tensor) where the function f(x) = x^exponent, is applied to the data tensor elementwise.
/// binarizer - Maps the values of a tensor element-wise to 0 or 1 based on the comparison against a threshold value.
/// array_feature_extractor - Selects elements of the input tensor based on the indices passed applied to the last tensor axis.
/// reduce_min - Computes the min of the input tensor's elements along the provided axes.
/// is_nan - Returns which elements of the input are NaN.
/// is_inf - Maps infinity to true and other values to false.
/// not - Computes the logical negation of all elements in the input tensor.
Expand Down Expand Up @@ -4525,6 +4524,55 @@ trait TensorTrait<T> {
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<T>;
/// ## tensor.reduce_max
///
/// ```rust
/// fn reduce_max(self: @Tensor<T>, axes: Option<Span<usize>>, keepdims: Option<bool>, noop_with_empty_axes: Option<bool>) -> Tensor<T>;
///```
///
/// Computes the max of the input tensor's elements along the provided axes.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The input tensor.
/// * `axes`(`Option<Span<usize>>`) - Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if 'noop_with_empty_axes' is false, else act as an identity op when 'noop_with_empty_axes' is true.
/// * `keepdims`(`Option<bool>`) - Keep the reduced dimensions or not, default true means keep reduced dimensions.
/// * `noop_with_empty_axes`(`Option<bool>`) - Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor.
///
/// ## Panics
///
/// * Panics if axis is not in the range of the input tensor's dimensions.
///
/// ## Returns
///
/// A new `Tensor<T>` instance with the specified axes reduced by maximum of its elements.
///
/// ## Examples
///
/// ```rust
/// use core::array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn reduce_max_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![2, 2, 2].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7].span(),
/// );
///
/// // We can call `reduce_max` function as follows.
/// return tensor.reduce_max(axes: array![1].span(),
/// keepdims: Option::None(()),
/// noop_with_empty_axes: Option::None(()));
/// }
/// >>> [[2,3],[6,7]]
/// ```
///
fn reduce_max(
self: @Tensor<T>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<T>;
/// #tensor.pow
///
/// ```rust
Expand Down
9 changes: 9 additions & 0 deletions src/operators/tensor/implementations/tensor_bool.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -409,6 +409,15 @@ impl BoolTensor of TensorTrait<bool> {
panic(array!['not supported!'])
}

fn reduce_max(
self: @Tensor<bool>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<bool> {
panic(array!['not supported!'])
}

fn pow(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
}
Expand Down
9 changes: 9 additions & 0 deletions src/operators/tensor/implementations/tensor_complex64.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -432,6 +432,15 @@ impl Complex64Tensor of TensorTrait<complex64> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}

fn reduce_max(
self: @Tensor<complex64>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<complex64> {
panic(array!['not supported!'])
}

fn reduce_min(
self: @Tensor<complex64>,
axes: Option<Span<usize>>,
Expand Down
9 changes: 9 additions & 0 deletions src/operators/tensor/implementations/tensor_fp16x16.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -466,6 +466,15 @@ impl FP16x16Tensor of TensorTrait<FP16x16> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}

fn reduce_max(
self: @Tensor<FP16x16>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP16x16> {
math::reduce_max::reduce_max(self, axes, keepdims, noop_with_empty_axes)
}

fn reduce_min(
self: @Tensor<FP16x16>,
axes: Option<Span<usize>>,
Expand Down
9 changes: 9 additions & 0 deletions src/operators/tensor/implementations/tensor_fp16x16wide.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -435,6 +435,15 @@ impl FP16x16WTensor of TensorTrait<FP16x16W> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}

fn reduce_max(
self: @Tensor<FP16x16W>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP16x16W> {
math::reduce_max::reduce_max(self, axes, keepdims, noop_with_empty_axes)
}

fn reduce_min(
self: @Tensor<FP16x16W>,
axes: Option<Span<usize>>,
Expand Down
9 changes: 9 additions & 0 deletions src/operators/tensor/implementations/tensor_fp32x32.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -463,6 +463,15 @@ impl FP32x32Tensor of TensorTrait<FP32x32> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}

fn reduce_max(
self: @Tensor<FP32x32>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP32x32> {
math::reduce_max::reduce_max(self, axes, keepdims, noop_with_empty_axes)
}

fn reduce_min(
self: @Tensor<FP32x32>,
axes: Option<Span<usize>>,
Expand Down
9 changes: 9 additions & 0 deletions src/operators/tensor/implementations/tensor_fp64x64.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -463,6 +463,15 @@ impl FP64x64Tensor of TensorTrait<FP64x64> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}

fn reduce_max(
self: @Tensor<FP64x64>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP64x64> {
math::reduce_max::reduce_max(self, axes, keepdims, noop_with_empty_axes)
}

fn reduce_min(
self: @Tensor<FP64x64>,
axes: Option<Span<usize>>,
Expand Down
9 changes: 9 additions & 0 deletions src/operators/tensor/implementations/tensor_fp8x23.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -459,6 +459,15 @@ impl FP8x23Tensor of TensorTrait<FP8x23> {
panic(array!['not supported!'])
}

fn reduce_max(
self: @Tensor<FP8x23>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP8x23> {
math::reduce_max::reduce_max(self, axes, keepdims, noop_with_empty_axes)
}

fn reduce_min(
self: @Tensor<FP8x23>,
axes: Option<Span<usize>>,
Expand Down
9 changes: 9 additions & 0 deletions src/operators/tensor/implementations/tensor_fp8x23wide.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -418,6 +418,15 @@ impl FP8x23WTensor of TensorTrait<FP8x23W> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}

fn reduce_max(
self: @Tensor<FP8x23W>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<FP8x23W> {
math::reduce_max::reduce_max(self, axes, keepdims, noop_with_empty_axes)
}

fn reduce_min(
self: @Tensor<FP8x23W>,
axes: Option<Span<usize>>,
Expand Down
9 changes: 9 additions & 0 deletions src/operators/tensor/implementations/tensor_i32.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -452,6 +452,15 @@ impl I32Tensor of TensorTrait<i32> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}

fn reduce_max(
self: @Tensor<i32>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<i32> {
math::reduce_max::reduce_max(self, axes, keepdims, noop_with_empty_axes)
}

fn reduce_min(
self: @Tensor<i32>,
axes: Option<Span<usize>>,
Expand Down
9 changes: 9 additions & 0 deletions src/operators/tensor/implementations/tensor_i8.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -455,6 +455,15 @@ impl I8Tensor of TensorTrait<i8> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}

fn reduce_max(
self: @Tensor<i8>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<i8> {
math::reduce_max::reduce_max(self, axes, keepdims, noop_with_empty_axes)
}

fn reduce_min(
self: @Tensor<i8>,
axes: Option<Span<usize>>,
Expand Down
9 changes: 9 additions & 0 deletions src/operators/tensor/implementations/tensor_u32.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -399,6 +399,15 @@ impl U32Tensor of TensorTrait<u32> {
math::reduce_mean::reduce_mean(self, axes, keepdims, noop_with_empty_axes)
}

fn reduce_max(
self: @Tensor<u32>,
axes: Option<Span<usize>>,
keepdims: Option<bool>,
noop_with_empty_axes: Option<bool>
) -> Tensor<u32> {
math::reduce_max::reduce_max(self, axes, keepdims, noop_with_empty_axes)
}

fn reduce_min(
self: @Tensor<u32>,
axes: Option<Span<usize>>,
Expand Down
1 change: 1 addition & 0 deletions src/operators/tensor/math.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ mod bitwise_and;
mod bitwise_xor;
mod bitwise_or;
mod gather_elements;
mod reduce_max;
mod reduce_min;
mod shrink;
mod reduce_mean;
Expand Down
3 changes: 2 additions & 1 deletion tests/nodes/reduce_max_fp16x16_2D_axis_1.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@ fn test_reduce_max_fp16x16_2D_axis_1() {
let input_0 = input_0::input_0();
let z_0 = output_0::output_0();

let y_0 = input_0.reduce_max(Option::Some(array![1].span()), Option::None(()), Option::None(()));
let y_0 = input_0
.reduce_max(Option::Some(array![1].span()), Option::None(()), Option::None(()));

assert_eq(y_0, z_0);
}
3 changes: 2 additions & 1 deletion tests/nodes/reduce_max_i32_2D_axis_1.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@ fn test_reduce_max_i32_2D_axis_1() {
let input_0 = input_0::input_0();
let z_0 = output_0::output_0();

let y_0 = input_0.reduce_max(Option::Some(array![1].span()), Option::None(()), Option::None(()));
let y_0 = input_0
.reduce_max(Option::Some(array![1].span()), Option::None(()), Option::None(()));

assert_eq(y_0, z_0);
}
3 changes: 2 additions & 1 deletion tests/nodes/reduce_max_i8_2D_axis_1.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@ fn test_reduce_max_i8_2D_axis_1() {
let input_0 = input_0::input_0();
let z_0 = output_0::output_0();

let y_0 = input_0.reduce_max(Option::Some(array![1].span()), Option::None(()), Option::None(()));
let y_0 = input_0
.reduce_max(Option::Some(array![1].span()), Option::None(()), Option::None(()));

assert_eq(y_0, z_0);
}
3 changes: 2 additions & 1 deletion tests/nodes/reduce_max_u32_2D_axis_1.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@ fn test_reduce_max_u32_2D_axis_1() {
let input_0 = input_0::input_0();
let z_0 = output_0::output_0();

let y_0 = input_0.reduce_max(Option::Some(array![1].span()), Option::None(()), Option::None(()));
let y_0 = input_0
.reduce_max(Option::Some(array![1].span()), Option::None(()), Option::None(()));

assert_eq(y_0, z_0);
}

0 comments on commit c5b5685

Please sign in to comment.