Skip to content

Commit

Permalink
Merge pull request #223 from 0xd3bs/feat/add-squeeze
Browse files Browse the repository at this point in the history
feat: add squeeze
  • Loading branch information
raphaelDkhn authored Oct 2, 2023
2 parents d925f03 + bffe988 commit 81f8588
Show file tree
Hide file tree
Showing 31 changed files with 631 additions and 40 deletions.
1 change: 1 addition & 0 deletions docs/SUMMARY.md
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@
* [tensor.quantize\_linear](framework/operators/tensor/tensor.quantize\_linear.md)
* [tensor.dequantize\_linear](framework/operators/tensor/tensor.dequantize\_linear.md)
* [tensor.nonzero](framework/operators/tensor/tensor.nonzero.md)
* [tensor.squeeze](framework/operators/tensor/tensor.squeeze.md)
* [tensor.unsqueeze](framework/operators/tensor/tensor.unsqueeze.md)
* [Neural Network](framework/operators/neural-network/README.md)
* [nn.relu](framework/operators/neural-network/nn.relu.md)
Expand Down
3 changes: 2 additions & 1 deletion docs/framework/compatibility.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ You can see below the list of current supported ONNX Operators:
| [QuantizeLinear](operators/tensor/tensor.quantize\_linear.md) | :white\_check\_mark: |
| [DequantizeLinear](operators/tensor/tensor.quantize\_linear.md) | :white\_check\_mark: |
| [Nonzero](operators/tensor/tensor.nonzero.md) | :white\_check\_mark: |
| [Squeeze](operators/tensor/tensor.squeeze.md) | :white\_check\_mark: |
| [Unsqueeze](operators/tensor/tensor.unsqueeze.md) | :white\_check\_mark: |

Current Operators support: **48/156 (30%)**
Current Operators support: **49/156 (31%)**
1 change: 1 addition & 0 deletions docs/framework/operators/tensor/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ use orion::operators::tensor::TensorTrait;
| [`tensor.dequantize_linear`](tensor.dequantize\_linear.md) | Dequantizes an i8 Tensor using linear dequantization. |
| [`tensor.gather`](tensor.gather.md) | Gather entries of the axis dimension of data. |
| [`tensor.nonzero`](tensor.nonzero.md) | Produces indices of the elements that are non-zero (in row-major order - by dimension). |
| [`tensor.squeeze`](tensor.squeeze.md) | Removes dimensions of size 1 from the shape of a tensor. |
| [`tensor.unsqueeze`](tensor.unsqueeze.md) | Inserts single-dimensional entries to the shape of an input tensor. |

## Arithmetic Operations
Expand Down
35 changes: 35 additions & 0 deletions docs/framework/operators/tensor/tensor.squeeze.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
# tensor.squeeze

```rust
fn squeeze(self: @Tensor<T>, axes: Option<Span<i32>>) -> Tensor<T>;
```

Removes dimensions of size 1 from the shape of a tensor.

## Args

* `self`(`@Tensor<T>`) - Tensor of data to calculate non-zero indices.
* `axes`(`Option<Span<i32>>`) - List of integers indicating the dimensions to squeeze.

## Returns

A new `Tensor<T>` Reshaped tensor with same data as input.

## Example

```rust
use array::{ArrayTrait, SpanTrait};

use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};

fn squeeze_example() -> Tensor<u32> {
let tensor = TensorTrait::<u32>::new(
shape: array![1, 2, 1, 2, 1].span(),
data: array![1, 1, 1, 1].span(),
);

return tensor.squeeze(axes: Option::None(());
}
>>> [[1 1]
[1 1]]
```
84 changes: 84 additions & 0 deletions nodegen/node/squeeze.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
import numpy as np
from nodegen.node import RunAll
from ..helpers import make_node, make_test, to_fp, Tensor, Dtype, FixedImpl


class Squeeze(RunAll):
@staticmethod
def squeeze_i8():
def squeeze():
x = np.ones((1, 2, 1, 2, 1), dtype=np.int8)
y = np.ones((2, 2, 1), dtype=np.int8)

x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())

name = "squeeze_i8"
make_node([x], [y], name)
make_test(
[x], y, "input_0.squeeze(Option::Some(array![i32 { mag: 0, sign: false }, i32 { mag: 2, sign: false }].span()))", name)
squeeze()

@staticmethod
def squeeze_i32():
def squeeze():
x = np.ones((1, 2, 1, 2, 1), dtype=np.int32)
y = np.ones((2, 2, 1), dtype=np.int32)

x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())

name = "squeeze_i32"
make_node([x], [y], name)
make_test(
[x], y, "input_0.squeeze(Option::Some(array![i32 { mag: 0, sign: false }, i32 { mag: 2, sign: false }].span()))", name)
squeeze()

@staticmethod
def squeeze_u32():
def squeeze():
x = np.ones((1, 2, 1, 2, 1), dtype=np.uint32)
y = np.ones((2, 2, 1), dtype=np.uint32)

x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())

name = "squeeze_u32"
make_node([x], [y], name)
make_test(
[x], y, "input_0.squeeze(Option::Some(array![i32 { mag: 0, sign: false }, i32 { mag: 2, sign: false }].span()))", name)
squeeze()

@staticmethod
def squeeze_fP16x16():
def squeeze():
x = to_fp(np.random.randint(0, 255, (1, 2, 1, 2, 1)
).astype(np.int64), FixedImpl.FP16x16)
y = to_fp(np.random.randint(0, 255, (2, 2, 1)
).astype(np.int64), FixedImpl.FP16x16)

x = Tensor(Dtype.FP16x16, x.shape, x.flatten())
y = Tensor(Dtype.FP16x16, y.shape, y.flatten())

name = "squeeze_fP16x16"
make_node([x], [y], name)
make_test(
[x], y, "input_0.squeeze(Option::Some(array![i32 { mag: 0, sign: false }, i32 { mag: 2, sign: false }].span()))", name)
squeeze()

@staticmethod
def squeeze_fP8x23():
def squeeze():
x = to_fp(np.random.randint(0, 255, (1, 2, 1, 2, 1)
).astype(np.int64), FixedImpl.FP8x23)
y = to_fp(np.random.randint(0, 255, (2, 2, 1)
).astype(np.int64), FixedImpl.FP8x23)

x = Tensor(Dtype.FP8x23, x.shape, x.flatten())
y = Tensor(Dtype.FP8x23, y.shape, y.flatten())

name = "squeeze_fP8x23"
make_node([x], [y], name)
make_test(
[x], y, "input_0.squeeze(Option::Some(array![i32 { mag: 0, sign: false }, i32 { mag: 2, sign: false }].span()))", name)
squeeze()
112 changes: 111 additions & 1 deletion src/operators/tensor/core.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use option::OptionTrait;
use alexandria_data_structures::array_ext::{SpanTraitExt};

use orion::operators::tensor::helpers::{len_from_shape, check_shape};
use orion::numbers::{i8, NumberTrait};
use orion::numbers::{i8, i32, NumberTrait};

#[derive(Copy, Drop)]
struct Tensor<T> {
Expand Down Expand Up @@ -74,6 +74,7 @@ impl TensorSerde<T, impl TSerde: Serde<T>, impl TDrop: Drop<T>> of Serde<Tensor<
/// dequantize_linear - Dequantizes an i8 Tensor using linear dequantization.
/// gather - Gather entries of the axis dimension of data.
/// nonzero - Produces indices of the elements that are non-zero (in row-major order - by dimension).
/// squeeze - Removes dimensions of size 1 from the shape of a tensor.
/// unsqueeze - Inserts single-dimensional entries to the shape of an input tensor.
///
trait TensorTrait<T> {
Expand Down Expand Up @@ -2556,6 +2557,46 @@ trait TensorTrait<T> {
/// ```
///
fn unsqueeze(self: @Tensor<T>, axes: Span<usize>) -> Tensor<T>;
/// # tensor.squeeze
///
/// ```rust
/// fn squeeze(self: @Tensor<T>, axes: Option<Span<i32>>) -> Tensor<T>;
/// ```
///
/// Removes dimensions of size 1 from the shape of a tensor.
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - Tensor of data to calculate non-zero indices.
/// * `axes`(`Option<Span<i32>>`) - List of integers indicating the dimensions to squeeze.
///
/// ## Returns
///
/// A new `Tensor<T>` Reshaped tensor with same data as input.
///
/// ## Example
///
/// ```rust
/// use array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn squeeze_example() -> Tensor<u32> {
/// let tensor = TensorTrait::<u32>::new(
/// shape: array![1, 2, 1, 2, 1].span(),
/// data: array![1, 1, 1, 1].span(),
/// );
///
/// return tensor.squeeze(axes: Option::None(());
/// }
/// >>> [[1 1]
/// [1 1]]
/// ```
///
fn squeeze(
self: @Tensor<T>,
axes: Option<Span<i32>>
) -> Tensor<T>;
}


Expand Down Expand Up @@ -2930,6 +2971,75 @@ fn nonzero<T, MAG, impl TTensor: TensorTrait<T>, impl TPartialEq: PartialEq<T>,
return Tensor::<usize> {shape: array![(*self.shape).len(), stop_k + 1].span(), data: output_data.span()};
}

/// Cf: TensorTrait::squeeze docstring
fn squeeze<T>(self: @Tensor<T>, axes: Option<Span<i32>>) -> Tensor<T> {

let target_shape = match axes {
Option::Some(mut axes) => {
let mut axis_squeezed = 0;
let mut shape = *self.shape;
loop {
match axes.pop_front() {
Option::Some(axis) => {
let mut reshape: Array<usize> = ArrayTrait::new();
let mut index = 0_usize;
let axis = if *axis.sign {
assert(*axis.mag <= (*self.shape).len(), 'axis out of accepted range');
(*self.shape).len() - *axis.mag
} else {
assert(*axis.mag < (*self.shape).len(), 'axis out of accepted range');
*axis.mag
};

loop {
match shape.pop_front() {
Option::Some(shape) => {
let squeezed = if axis >= axis_squeezed {
axis - axis_squeezed
} else {
axis
};
if index == squeezed {
assert(*shape == 1, 'shape entry not equal to one');
axis_squeezed += 1;
} else {
reshape.append(*shape);
}
},
Option::None(_) => {
break;
},
};
index += 1;
};
shape = reshape.span();
},
Option::None(_) => {
break shape;
},
};
}
},
Option::None(_) => {
let mut reshape: Array<usize> = ArrayTrait::new();
let mut shape = *self.shape;
loop {
match shape.pop_front() {
Option::Some(shape) => {
if *shape != 1 {
reshape.append(*shape);
}
},
Option::None(_) => {
break reshape.span();
},
};
}
},
};

return Tensor::<T>{ shape: target_shape, data: *self.data };
}
/// Cf: TensorTrait::unsqueeze docstring
fn unsqueeze<T>(self: @Tensor<T>, axes: Span<usize>) -> Tensor<T> {
let dedupped_array = axes.dedup();
Expand Down
11 changes: 6 additions & 5 deletions src/operators/tensor/implementations/tensor_fp16x16.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use orion::operators::tensor::core::{
new_tensor, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape, at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core};
use orion::numbers::{i8, NumberTrait, FP16x16};
use orion::numbers::{i8, i32, NumberTrait, FP16x16};
use orion::operators::tensor::implementations::{tensor_i8::I8Tensor, tensor_u32::U32Tensor};

impl FP16x16Tensor of TensorTrait<FP16x16> {
Expand Down Expand Up @@ -214,10 +214,11 @@ impl FP16x16Tensor of TensorTrait<FP16x16> {
core::nonzero(self)
}

fn unsqueeze(
self: @Tensor<FP16x16>,
axes: Span<usize>
) -> Tensor<FP16x16> {
fn squeeze(self: @Tensor<FP16x16>, axes: Option<Span<i32>>) -> Tensor<FP16x16> {
core::squeeze(self, axes)
}

fn unsqueeze(self: @Tensor<FP16x16>, axes: Span<usize>) -> Tensor<FP16x16> {
core::unsqueeze(self, axes)
}
}
Expand Down
13 changes: 7 additions & 6 deletions src/operators/tensor/implementations/tensor_fp32x32.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use orion::operators::tensor::core::{
new_tensor, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape, at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core};
use orion::numbers::{i8, NumberTrait, FP32x32, FP32x32Impl};
use orion::numbers::{i8, i32, NumberTrait, FP32x32, FP32x32Impl};
use orion::numbers::fixed_point::implementations::fp32x32::core::ONE;
use orion::operators::tensor::implementations::{tensor_i8::I8Tensor, tensor_u32::U32Tensor};

Expand Down Expand Up @@ -210,15 +210,16 @@ impl FP32x32Tensor of TensorTrait<FP32x32> {
) -> Tensor<FP32x32> {
math::gather::gather(self, indices, axis)
}

fn nonzero(self: @Tensor<FP32x32>) -> Tensor<usize> {
core::nonzero(self)
}

fn unsqueeze(
self: @Tensor<FP32x32>,
axes: Span<usize>
) -> Tensor<FP32x32> {
fn squeeze(self: @Tensor<FP32x32>, axes: Option<Span<i32>>) -> Tensor<FP32x32> {
core::squeeze(self, axes)
}

fn unsqueeze(self: @Tensor<FP32x32>, axes: Span<usize>) -> Tensor<FP32x32> {
core::unsqueeze(self, axes)
}
}
Expand Down
13 changes: 7 additions & 6 deletions src/operators/tensor/implementations/tensor_fp64x64.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use orion::operators::tensor::core::{
new_tensor, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape, at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core};
use orion::numbers::{i8, NumberTrait, FP64x64, FP64x64Impl};
use orion::numbers::{i8, i32, NumberTrait, FP64x64, FP64x64Impl};
use orion::numbers::fixed_point::implementations::fp64x64::core::ONE;
use orion::operators::tensor::implementations::{tensor_i8::I8Tensor, tensor_u32::U32Tensor};

Expand Down Expand Up @@ -210,15 +210,16 @@ impl FP64x64Tensor of TensorTrait<FP64x64> {
) -> Tensor<FP64x64> {
math::gather::gather(self, indices, axis)
}

fn nonzero(self: @Tensor<FP64x64>) -> Tensor<usize> {
core::nonzero(self)
}

fn unsqueeze(
self: @Tensor<FP64x64>,
axes: Span<usize>
) -> Tensor<FP64x64> {
fn squeeze(self: @Tensor<FP64x64>, axes: Option<Span<i32>>) -> Tensor<FP64x64> {
core::squeeze(self, axes)
}

fn unsqueeze(self: @Tensor<FP64x64>, axes: Span<usize>) -> Tensor<FP64x64> {
core::unsqueeze(self, axes)
}
}
Expand Down
11 changes: 6 additions & 5 deletions src/operators/tensor/implementations/tensor_fp8x23.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use orion::operators::tensor::core::{
new_tensor, stride, Tensor, TensorTrait, ravel_index, unravel_index, reshape, at_tensor,
};
use orion::operators::tensor::{math, linalg, quantization, core};
use orion::numbers::{i8, NumberTrait, FP8x23};
use orion::numbers::{i8, i32, NumberTrait, FP8x23};
use orion::operators::tensor::implementations::{tensor_i8::I8Tensor, tensor_u32::U32Tensor};

impl FP8x23Tensor of TensorTrait<FP8x23> {
Expand Down Expand Up @@ -214,10 +214,11 @@ impl FP8x23Tensor of TensorTrait<FP8x23> {
core::nonzero(self)
}

fn unsqueeze(
self: @Tensor<FP8x23>,
axes: Span<usize>
) -> Tensor<FP8x23> {
fn squeeze(self: @Tensor<FP8x23>, axes: Option<Span<i32>>) -> Tensor<FP8x23> {
core::squeeze(self, axes)
}

fn unsqueeze(self: @Tensor<FP8x23>, axes: Span<usize>) -> Tensor<FP8x23> {
core::unsqueeze(self, axes)
}
}
Expand Down
Loading

0 comments on commit 81f8588

Please sign in to comment.