Skip to content

Commit

Permalink
Fix test cases with TFMA 0.47.0, TFDV 1.16.1 (#7620)
Browse files Browse the repository at this point in the history
* Fix test cases with TFMA 0.47.0

* Add xfail to deprecated model validator test case which is not working with TFMA 0.47.0

* Comment out experimental sklearn_predict_extractor_test.py which is not compatible with TFMA 0.47.0
  • Loading branch information
nikelite authored Nov 21, 2024
1 parent 2d94da5 commit a758387
Show file tree
Hide file tree
Showing 5 changed files with 180 additions and 184 deletions.
4 changes: 2 additions & 2 deletions tfx/components/distribution_validator/executor_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ def testSplitPairs(self, split_pairs, expected_split_pair_names,
step: 'company'
}
validations {
sql_expression: 'feature_test.string_stats.unique > feature_base.string_stats.unique'
sql_expression: 'feature_test.string_stats.unique > feature_base.string_stats.unique * 2'
severity: ERROR
description: 'Test feature has too few unique values.'
}
Expand All @@ -308,7 +308,7 @@ def testSplitPairs(self, split_pairs, expected_split_pair_names,
reason {
type: CUSTOM_VALIDATION
short_description: "Test feature has too few unique values."
description: "Custom validation triggered anomaly. Query: feature_test.string_stats.unique > feature_base.string_stats.unique Test dataset: default slice Base dataset: Base path: company" }
description: "Custom validation triggered anomaly. Query: feature_test.string_stats.unique > feature_base.string_stats.unique * 2 Test dataset: default slice Base dataset: Base path: company" }
path {
step: "company"
}
Expand Down
15 changes: 2 additions & 13 deletions tfx/components/evaluator/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
import tensorflow_model_analysis as tfma
# Need to import the following module so that the fairness indicator post-export
# metric is registered.
import tensorflow_model_analysis.addons.fairness.post_export_metrics.fairness_indicators # pylint: disable=unused-import
from tfx import types
from tfx.components.evaluator import constants
from tfx.components.util import udf_utils
Expand Down Expand Up @@ -102,16 +101,6 @@ def Do(self, input_dict: Dict[str, List[types.Artifact]],

self._log_startup(input_dict, output_dict, exec_properties)

# Add fairness indicator metric callback if necessary.
fairness_indicator_thresholds = json_utils.loads(
exec_properties.get(
standard_component_specs.FAIRNESS_INDICATOR_THRESHOLDS_KEY, 'null'))
add_metrics_callbacks = None
if fairness_indicator_thresholds:
add_metrics_callbacks = [
tfma.post_export_metrics.fairness_indicators( # pytype: disable=module-attr
thresholds=fairness_indicator_thresholds),
]

output_uri = artifact_utils.get_single_uri(
output_dict[constants.EVALUATION_KEY])
Expand Down Expand Up @@ -196,7 +185,7 @@ def Do(self, input_dict: Dict[str, List[types.Artifact]],
eval_saved_model_path=model_path,
model_name=model_spec.name,
eval_config=eval_config,
add_metrics_callbacks=add_metrics_callbacks))
add_metrics_callbacks=None))
else:
eval_config = None
assert (standard_component_specs.FEATURE_SLICING_SPEC_KEY
Expand All @@ -219,7 +208,7 @@ def Do(self, input_dict: Dict[str, List[types.Artifact]],
eval_saved_model_path=model_path,
model_name='',
eval_config=None,
add_metrics_callbacks=add_metrics_callbacks))
add_metrics_callbacks=None))

eval_shared_model = models[0] if len(models) == 1 else models
schema = None
Expand Down
5 changes: 4 additions & 1 deletion tfx/components/evaluator/executor_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

import glob
import os
import pytest

from absl import logging
from absl.testing import parameterized
Expand Down Expand Up @@ -147,6 +148,7 @@ def testEvalution(self, exec_properties, model_agnostic=False):
column_for_slicing=['trip_start_day', 'trip_miles']),
])),
}))
@pytest.mark.xfail(run=False, reason="EvalSavedModel is deprecated.")
def testDoLegacySingleEvalSavedModelWFairness(self, exec_properties):
source_data_dir = os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'testdata')
Expand Down Expand Up @@ -180,7 +182,8 @@ def testDoLegacySingleEvalSavedModelWFairness(self, exec_properties):
# post-export metric is registered. This may raise an ImportError if the
# currently-installed version of TFMA does not support fairness
# indicators.
import tensorflow_model_analysis.addons.fairness.post_export_metrics.fairness_indicators # noqa: F401
# Note: tensorflow_model_analysis.addons is deprecated from 0.47.0.
# import tensorflow_model_analysis.addons.fairness.post_export_metrics.fairness_indicators # noqa: F401
exec_properties[
standard_component_specs
.FAIRNESS_INDICATOR_THRESHOLDS_KEY] = '[0.1, 0.3, 0.5, 0.7, 0.9]'
Expand Down
3 changes: 3 additions & 0 deletions tfx/components/model_validator/executor_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
"""Tests for tfx.components.model_validator.executor."""

import os
import pytest
import tensorflow as tf

from tfx.components.model_validator import constants
Expand All @@ -23,6 +24,8 @@
from tfx.types import standard_artifacts


@pytest.mark.xfail(run=False,
reason="Model validator is deprecated and this doesn't work with TFMA 0.47.0")
class ExecutorTest(tf.test.TestCase):

def setUp(self):
Expand Down
Loading

0 comments on commit a758387

Please sign in to comment.