From a4c38335dcecca850128ace405eec80ce74b7ef2 Mon Sep 17 00:00:00 2001 From: Roman Bredehoft Date: Tue, 6 Feb 2024 15:54:01 +0100 Subject: [PATCH] fix: add support to AvgPool's missing parameters --- src/concrete/ml/onnx/ops_impl.py | 60 ++++++++++++------ src/concrete/ml/quantization/quantized_ops.py | 29 +++++++-- tests/quantization/test_quantized_ops.py | 61 ++++++++++++++++++- 3 files changed, 127 insertions(+), 23 deletions(-) diff --git a/src/concrete/ml/onnx/ops_impl.py b/src/concrete/ml/onnx/ops_impl.py index d2d5f98bc6..56b8524123 100644 --- a/src/concrete/ml/onnx/ops_impl.py +++ b/src/concrete/ml/onnx/ops_impl.py @@ -1337,10 +1337,12 @@ def numpy_conv( def numpy_avgpool( x: numpy.ndarray, *, - ceil_mode: int, kernel_shape: Tuple[int, ...], - pads: Tuple[int, ...] = None, - strides: Tuple[int, ...] = None, + auto_pad: str = "NOTSET", + ceil_mode: int = 0, + count_include_pad: int = 1, + pads: Optional[Tuple[int, ...]] = None, + strides: Optional[Tuple[int, ...]] = None, ) -> Tuple[numpy.ndarray]: """Compute Average Pooling using Torch. @@ -1349,29 +1351,53 @@ def numpy_avgpool( See: https://github.com/onnx/onnx/blob/main/docs/Operators.md#AveragePool Args: - x (numpy.ndarray): input data (many dtypes are supported). Shape is N x C x H x W for 2d - ceil_mode (int): ONNX rounding parameter, expected 0 (torch style dimension computation) - kernel_shape (Tuple[int, ...]): shape of the kernel. Should have 2 elements for 2d conv - pads (Tuple[int, ...]): padding in ONNX format (begin, end) on each axis - strides (Tuple[int, ...]): stride of the convolution on each axis + x (numpy.ndarray): Input data of shape (N, C, H, W), as only 2D inputs are currently + supported. + kernel_shape (Tuple[int, ...]): The size of the kernel along each axis. Currently, only 2D + kernels are supported. + auto_pad (str): Only the default "NOTSET" value is currently supported, which means + explicit padding is used. + ceil_mode (int): Whether to use ONNX's ceil (1) or floor (0, the default) to compute the + output shape. + count_include_pad (int): Whether include pad pixels when calculating values for the edges. + Currently, setting this parameter to 0 is not supported in Concrete ML. + pads (Tuple[int, ...]): Padding for the beginning and ending along each spatial axis. + Expected format is [x1_begin, x2_begin...x1_end, x2_end, ...] where xi_begin (resp. + xi_end) is the number of pixels added at the beginning (resp. end) of axis `i`. + strides (Tuple[int, ...]): Stride along each spatial axis. If not present, the stride + defaults to 1 along each spatial axis. Returns: res (numpy.ndarray): a tensor of size (N x InChannels x OutHeight x OutWidth). See https://pytorch.org/docs/stable/generated/torch.nn.AvgPool2d.html - - Raises: - AssertionError: if the pooling arguments are wrong """ - assert_true(len(kernel_shape) == 2, "The average pool operator currently supports only 2-d") + assert_true( + auto_pad == "NOTSET", + "The 'auto_pad' parameter is not supported. Please keep the the default 'NOTSET' value and " + "provide explicit padding.", + ) + + assert_true(len(kernel_shape) == 2, "The Average Pool operator currently supports only 2d.") - # For mypy - assert pads is None or len(pads) == 4 + assert_true( + count_include_pad == 1, + "Pad pixels must be included when calculating values on the edges. Please set " + "'count_include_pad' to 1.", + ) - # For mypy - assert len(kernel_shape) == 2 + assert_true( + strides is None or len(kernel_shape) == len(strides), + "The Average Pool operator requires the number of strides to be the same as the number of " + "kernel dimensions.", + ) - assert strides is None or len(strides) == 2 + assert_true( + pads is None or len(pads) == 2 * len(kernel_shape), + "The Average Pool operator in Concrete ML requires padding to be specified as " + " (pad_left_dim1, pad_right_dim1, pad_left_dim2, pad_right_dim2, ...), following ONNX" + " standard.", + ) # Use default values if the ONNX did not set these parameters pads = (0, 0, 0, 0) if pads is None else pads diff --git a/src/concrete/ml/quantization/quantized_ops.py b/src/concrete/ml/quantization/quantized_ops.py index 09427d9416..48547ac84c 100644 --- a/src/concrete/ml/quantization/quantized_ops.py +++ b/src/concrete/ml/quantization/quantized_ops.py @@ -1043,27 +1043,46 @@ def __init__( ) # Get the ONNX parameters - self.ceil_mode = attrs.get("ceil_mode", None) + self.ceil_mode = attrs.get("ceil_mode", 0) + self.auto_pad = attrs.get("auto_pad", "NOTSET") self.kernel_shape = attrs.get("kernel_shape", None) + + assert_true(self.kernel_shape is not None, "Setting parameter 'kernel_shape' is required.") + + self.count_include_pad = attrs.get("count_include_pad", 1) self.pads = attrs.get("pads", tuple([0] * 2 * (len(self.kernel_shape) - 2))) self.dilations = attrs.get("dilations", tuple([1] * len(self.kernel_shape))) self.strides = attrs.get("strides", tuple([1] * len(self.kernel_shape))) # Validate the parameters + assert_true( + self.auto_pad == "NOTSET", + "The 'auto_pad' parameter is not supported. Please keep the the default 'NOTSET' value " + "and provide explicit padding.", + ) + assert_true( len(self.kernel_shape) == 2, - "The Average Pool operator currently supports only 2d", + "The Average Pool operator currently supports only 2d.", ) + + assert_true( + self.count_include_pad == 1, + "Pad pixels must be included when calculating values on the edges. Please set " + "'count_include_pad' to 1.", + ) + assert_true( len(self.kernel_shape) == len(self.strides), - "The Average Pool operator requires the number of strides to " - "be the same as the number of kernel dimensions", + "The Average Pool operator requires the number of strides to be the same as the number " + "of kernel dimensions.", ) + assert_true( len(self.pads) == 2 * len(self.kernel_shape), "The Average Pool operator in Concrete ML requires padding to be specified as " " (pad_left_dim1, pad_right_dim1, pad_left_dim2, pad_right_dim2, ...), following ONNX" - " standard", + " standard.", ) self.kernel: Union[numpy.ndarray, None] = None diff --git a/tests/quantization/test_quantized_ops.py b/tests/quantization/test_quantized_ops.py index b592fea9a8..c502a68f34 100644 --- a/tests/quantization/test_quantized_ops.py +++ b/tests/quantization/test_quantized_ops.py @@ -905,6 +905,65 @@ def test_quantized_avg_pool(params, n_bits, is_signed, check_r2_score, check_flo ) +def test_quantized_avg_pool_args(): + """Check that unsupported parameters for AvgPool properly raise errors.""" + n_bits = 2 + + with pytest.raises(AssertionError, match=r"Setting parameter 'kernel_shape' is required."): + QuantizedAvgPool( + n_bits, + OP_DEBUG_NAME + "QuantizedAvgPool", + ) + + with pytest.raises(AssertionError, match=r"The 'auto_pad' parameter is not supported.*"): + QuantizedAvgPool( + n_bits, + OP_DEBUG_NAME + "QuantizedAvgPool", + kernel_shape=(1, 1), + auto_pad="SAME_UPPER", + ) + + with pytest.raises( + AssertionError, match=r"The Average Pool operator currently supports only 2d" + ): + QuantizedAvgPool( + n_bits, + OP_DEBUG_NAME + "QuantizedAvgPool", + kernel_shape=(1,), + ) + + with pytest.raises( + AssertionError, match=r"Pad pixels must be included when calculating values on the edges.*" + ): + QuantizedAvgPool( + n_bits, + OP_DEBUG_NAME + "QuantizedAvgPool", + kernel_shape=(1, 1), + count_include_pad=0, + ) + + with pytest.raises( + AssertionError, + match=r"The Average Pool operator requires the number of strides to be the same.*", + ): + QuantizedAvgPool( + n_bits, + OP_DEBUG_NAME + "QuantizedAvgPool", + kernel_shape=(1, 1), + strides=(1,), + ) + + with pytest.raises( + AssertionError, match=r"The Average Pool operator in Concrete ML requires padding.*" + ): + QuantizedAvgPool( + n_bits, + OP_DEBUG_NAME + "QuantizedAvgPool", + kernel_shape=(1, 1), + pads=(0, 0), + ) + + @pytest.mark.parametrize("n_bits", [16]) @pytest.mark.parametrize( "params", @@ -1038,7 +1097,7 @@ def test_quantized_max_pool(params, n_bits, is_signed, check_r2_score, check_flo def test_quantized_conv_args(): - """Check that conv arguments are validated""" + """Check that conv arguments are validated.""" n_bits = 2 weights = numpy.random.uniform(size=(10, 1, 16, 16)) * 0.2