Skip to content

Commit

Permalink
fix and enable torch bops tests
Browse files Browse the repository at this point in the history
  • Loading branch information
irenaby committed Mar 3, 2025
1 parent 4a1f731 commit fd444d5
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 9 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ def compare(self, quantized_model, float_model, input_x=None, quantization_info=
# Verify that some layers got bit-width smaller than 8 bits (so checking candidate index is not 0)
self.unit_test.assertTrue(any(i > 0 for i in quantization_info.mixed_precision_cfg))
# Verify final BOPs utilization
self.unit_test.assertTrue(quantization_info.final_resource_utilization.bops <= self.get_resource_utilization().bops)
self.unit_test.assertTrue(self.get_resource_utilization().is_satisfied_by(quantization_info.final_resource_utilization))


class MixedPrecisionBopsBasicTest(BaseMixedPrecisionBopsTest):
Expand Down Expand Up @@ -198,23 +198,24 @@ def __init__(self, unit_test):
super().__init__(unit_test)

def get_resource_utilization(self):
return ResourceUtilization(activation_memory=1000, bops=3000000) # should require some quantization to all layers
# act max cut 2bit = 2*(30*30*4)*2/8 = 1800
return ResourceUtilization(activation_memory=1800, bops=3000000) # should require some quantization to all layers


class MixedPrecisionBopsAndTotalMemoryUtilizationTest(MixedPrecisionBopsAllWeightsLayersTest):
def __init__(self, unit_test):
super().__init__(unit_test)

def get_resource_utilization(self):
return ResourceUtilization(total_memory=1100, bops=3000000) # should require some quantization to all layers
return ResourceUtilization(total_memory=2000, bops=3000000) # should require some quantization to all layers


class MixedPrecisionBopsWeightsActivationUtilizationTest(MixedPrecisionBopsAllWeightsLayersTest):
def __init__(self, unit_test):
super().__init__(unit_test)

def get_resource_utilization(self):
return ResourceUtilization(weights_memory=150, activation_memory=1000, bops=3000000) # should require some quantization to all layers
return ResourceUtilization(weights_memory=150, activation_memory=1800, bops=3000000) # should require some quantization to all layers


class MixedPrecisionBopsMultipleOutEdgesTest(BaseMixedPrecisionBopsTest):
Expand Down
9 changes: 4 additions & 5 deletions tests/pytorch_tests/model_tests/test_feature_models_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -624,11 +624,10 @@ def test_mixed_precision_bops_utilization(self):
MixedPrecisionBopsAllWeightsLayersTest(self).run_test()
MixedPrecisionWeightsOnlyBopsTest(self).run_test()
MixedPrecisionActivationOnlyBopsTest(self).run_test()
# TODO: uncomment these tests when the issue of combined BOPs and other RU metrics is solved.
# MixedPrecisionBopsAndWeightsMemoryUtilizationTest(self).run_test()
# MixedPrecisionBopsAndActivationMemoryUtilizationTest(self).run_test()
# MixedPrecisionBopsAndTotalMemoryUtilizationTest(self).run_test()
# MixedPrecisionBopsWeightsActivationUtilizationTest(self).run_test()
MixedPrecisionBopsAndWeightsMemoryUtilizationTest(self).run_test()
MixedPrecisionBopsAndActivationMemoryUtilizationTest(self).run_test()
MixedPrecisionBopsAndTotalMemoryUtilizationTest(self).run_test()
MixedPrecisionBopsWeightsActivationUtilizationTest(self).run_test()
MixedPrecisionBopsMultipleOutEdgesTest(self).run_test()

def test_mixed_precision_distance_functions(self):
Expand Down

0 comments on commit fd444d5

Please sign in to comment.