From fd444d50c75a8fdef4b87fa30a221d1a2a354875 Mon Sep 17 00:00:00 2001 From: irenab Date: Mon, 3 Mar 2025 12:30:30 +0200 Subject: [PATCH] fix and enable torch bops tests --- .../feature_models/mixed_precision_bops_test.py | 9 +++++---- .../model_tests/test_feature_models_runner.py | 9 ++++----- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_bops_test.py b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_bops_test.py index 7f80e3ac1..eb129f488 100644 --- a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_bops_test.py +++ b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_bops_test.py @@ -131,7 +131,7 @@ def compare(self, quantized_model, float_model, input_x=None, quantization_info= # Verify that some layers got bit-width smaller than 8 bits (so checking candidate index is not 0) self.unit_test.assertTrue(any(i > 0 for i in quantization_info.mixed_precision_cfg)) # Verify final BOPs utilization - self.unit_test.assertTrue(quantization_info.final_resource_utilization.bops <= self.get_resource_utilization().bops) + self.unit_test.assertTrue(self.get_resource_utilization().is_satisfied_by(quantization_info.final_resource_utilization)) class MixedPrecisionBopsBasicTest(BaseMixedPrecisionBopsTest): @@ -198,7 +198,8 @@ def __init__(self, unit_test): super().__init__(unit_test) def get_resource_utilization(self): - return ResourceUtilization(activation_memory=1000, bops=3000000) # should require some quantization to all layers + # act max cut 2bit = 2*(30*30*4)*2/8 = 1800 + return ResourceUtilization(activation_memory=1800, bops=3000000) # should require some quantization to all layers class MixedPrecisionBopsAndTotalMemoryUtilizationTest(MixedPrecisionBopsAllWeightsLayersTest): @@ -206,7 +207,7 @@ def __init__(self, unit_test): super().__init__(unit_test) def get_resource_utilization(self): - return ResourceUtilization(total_memory=1100, bops=3000000) # should require some quantization to all layers + return ResourceUtilization(total_memory=2000, bops=3000000) # should require some quantization to all layers class MixedPrecisionBopsWeightsActivationUtilizationTest(MixedPrecisionBopsAllWeightsLayersTest): @@ -214,7 +215,7 @@ def __init__(self, unit_test): super().__init__(unit_test) def get_resource_utilization(self): - return ResourceUtilization(weights_memory=150, activation_memory=1000, bops=3000000) # should require some quantization to all layers + return ResourceUtilization(weights_memory=150, activation_memory=1800, bops=3000000) # should require some quantization to all layers class MixedPrecisionBopsMultipleOutEdgesTest(BaseMixedPrecisionBopsTest): diff --git a/tests/pytorch_tests/model_tests/test_feature_models_runner.py b/tests/pytorch_tests/model_tests/test_feature_models_runner.py index 208818613..89b7ca35e 100644 --- a/tests/pytorch_tests/model_tests/test_feature_models_runner.py +++ b/tests/pytorch_tests/model_tests/test_feature_models_runner.py @@ -624,11 +624,10 @@ def test_mixed_precision_bops_utilization(self): MixedPrecisionBopsAllWeightsLayersTest(self).run_test() MixedPrecisionWeightsOnlyBopsTest(self).run_test() MixedPrecisionActivationOnlyBopsTest(self).run_test() - # TODO: uncomment these tests when the issue of combined BOPs and other RU metrics is solved. - # MixedPrecisionBopsAndWeightsMemoryUtilizationTest(self).run_test() - # MixedPrecisionBopsAndActivationMemoryUtilizationTest(self).run_test() - # MixedPrecisionBopsAndTotalMemoryUtilizationTest(self).run_test() - # MixedPrecisionBopsWeightsActivationUtilizationTest(self).run_test() + MixedPrecisionBopsAndWeightsMemoryUtilizationTest(self).run_test() + MixedPrecisionBopsAndActivationMemoryUtilizationTest(self).run_test() + MixedPrecisionBopsAndTotalMemoryUtilizationTest(self).run_test() + MixedPrecisionBopsWeightsActivationUtilizationTest(self).run_test() MixedPrecisionBopsMultipleOutEdgesTest(self).run_test() def test_mixed_precision_distance_functions(self):