From 6cb97454408a206e408e0d6cba141f92b06c52a3 Mon Sep 17 00:00:00 2001 From: Pritam Dodeja Date: Tue, 24 Feb 2026 07:47:13 -0500 Subject: [PATCH] Add python 3.12 support and migrate TFMA config namespaces Description: Updated pyproject.toml to support Python versions up to 3.13 (previously capped at 3.11). Migrated TFX components, benchmarks, and examples to use explicit TFMA protobuf namespaces. Fixed AttributeError issues caused by the removal of configuration aliases in the top-level tensorflow_model_analysis SDK. Technical Details: In TFMA 1.18.0.dev and later, configuration symbols like EvalConfig, SlicingSpec, and ModelSpec have been moved out of the main SDK namespace. This commit updates all references to these symbols to use the tfma.proto.config_pb2 or explicit config_pb2 paths. Key changes include: Dependency Update: Bumped requires-python in pyproject.toml to >=3.9, <3.14. Namespace Migration: Replaced tfma. with config_pb2. or tfma.proto.config_pb2. across: tfx/components/evaluator (core logic and tests) tfx/benchmarks tfx/examples (Chicago Taxi, Penguin, BERT, Ranking, etc.) tfx/types/standard_component_specs.py Imports: Added from tensorflow_model_analysis.proto import config_pb2 to files utilizing the new configuration paths. Affected configuration symbols: EvalConfig, SlicingSpec, MetricConfig, MetricsSpec, ModelSpec, MetricThreshold, GenericValueThreshold, GenericChangeThreshold, and MetricDirection. Testing summary: (tfx-312-1) ***@***:1$ export TF_USE_LEGACY_KERAS=1 && pytest -v tfx/tfx/components 40 failed, 429 passed, 67 skipped, 9 xfailed, 26 warnings in 880.92s (0:14:40) ================================================================== --- pyproject.toml | 2 +- tfx/benchmarks/tfma_v2_benchmark_base.py | 23 +++--- tfx/components/evaluator/component.py | 5 +- tfx/components/evaluator/component_test.py | 5 +- tfx/components/evaluator/executor.py | 5 +- tfx/components/evaluator/executor_test.py | 81 ++++++++++--------- .../testdata/module_file/evaluator_module.py | 5 +- .../compiler/testdata/composable_pipeline.py | 17 ++-- .../testdata/composable_pipeline_async.py | 17 ++-- .../compiler/testdata/conditional_pipeline.py | 17 ++-- .../compiler/testdata/iris_pipeline_async.py | 17 ++-- .../compiler/testdata/iris_pipeline_sync.py | 17 ++-- .../taxi/setup/dags/taxi_pipeline.py | 21 ++--- tfx/examples/bert/cola/bert_cola_pipeline.py | 19 ++--- tfx/examples/bert/mrpc/bert_mrpc_pipeline.py | 19 ++--- .../taxi_pipeline_native_keras.py | 19 ++--- .../taxi_pipeline_simple.py | 19 ++--- tfx/examples/imdb/imdb_fetch_data.py | 2 +- .../imdb/imdb_pipeline_native_keras.py | 19 ++--- .../mnist/mnist_pipeline_native_keras.py | 15 ++-- .../penguin_pipeline_sklearn_gcp.py | 19 ++--- .../experimental/sklearn_predict_extractor.py | 3 +- .../sklearn_predict_extractor_test.py | 9 ++- .../penguin/penguin_pipeline_kubeflow.py | 19 ++--- .../penguin/penguin_pipeline_local.py | 19 ++--- .../penguin_pipeline_local_infraval.py | 19 ++--- tfx/examples/ranking/ranking_pipeline.py | 17 ++-- .../templates/penguin/pipeline/pipeline.py | 19 ++--- .../templates/taxi/pipeline/pipeline.py | 19 ++--- tfx/orchestration/kubeflow/test_utils.py | 21 ++--- tfx/orchestration/kubeflow/v2/test_utils.py | 19 ++--- tfx/types/standard_component_specs.py | 4 +- 32 files changed, 280 insertions(+), 251 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 10a6c6121d..d781cb9313 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -31,7 +31,7 @@ classifiers = [ "Topic :: Software Development :: Libraries :: Python Modules" ] keywords = ["tensorflow", "tfx"] -requires-python = ">=3.9,<3.11" +requires-python = ">=3.9,<3.14" [project.urls] Homepage = "https://www.tensorflow.org/tfx" Repository = "https://github.com/tensorflow/tfx" diff --git a/tfx/benchmarks/tfma_v2_benchmark_base.py b/tfx/benchmarks/tfma_v2_benchmark_base.py index 478e82ba9e..593eb32e80 100644 --- a/tfx/benchmarks/tfma_v2_benchmark_base.py +++ b/tfx/benchmarks/tfma_v2_benchmark_base.py @@ -21,6 +21,7 @@ import numpy as np import tensorflow as tf import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tensorflow_model_analysis.evaluators import metrics_plots_and_validations_evaluator from tensorflow_model_analysis.evaluators import poisson_bootstrap from tensorflow_model_analysis.extractors import example_weights_extractor @@ -73,16 +74,16 @@ def _init_model(self, multi_model, validation): if validation: # Only one metric, adding a threshold for all slices. metric_specs[0].metrics[0].threshold.CopyFrom( - tfma.MetricThreshold( - value_threshold=tfma.GenericValueThreshold( + tfma.proto.config_pb2.MetricThreshold( + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( lower_bound={"value": 0.5}, upper_bound={"value": 0.5}), - change_threshold=tfma.GenericChangeThreshold( + change_threshold=tfma.proto.config_pb2.GenericChangeThreshold( absolute={"value": -0.001}, - direction=tfma.MetricDirection.HIGHER_IS_BETTER))) - self._eval_config = tfma.EvalConfig( + direction=tfma.proto.config_pb2.MetricDirection.HIGHER_IS_BETTER))) + self._eval_config = config_pb2.EvalConfig( model_specs=[ - tfma.ModelSpec(name="candidate", label_key="tips"), - tfma.ModelSpec( + tfma.proto.config_pb2.ModelSpec(name="candidate", label_key="tips"), + tfma.proto.config_pb2.ModelSpec( name="baseline", label_key="tips", is_baseline=True) ], metrics_specs=metric_specs) @@ -104,11 +105,11 @@ def _init_model(self, multi_model, validation): if validation: # Only one metric, adding a threshold for all slices. metric_specs[0].metrics[0].threshold.CopyFrom( - tfma.MetricThreshold( - value_threshold=tfma.GenericValueThreshold( + tfma.proto.config_pb2.MetricThreshold( + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( lower_bound={"value": 0.5}, upper_bound={"value": 0.5}))) - self._eval_config = tfma.EvalConfig( - model_specs=[tfma.ModelSpec(label_key="tips")], + self._eval_config = config_pb2.EvalConfig( + model_specs=[tfma.proto.config_pb2.ModelSpec(label_key="tips")], metrics_specs=metric_specs) self._eval_shared_models = { "": diff --git a/tfx/components/evaluator/component.py b/tfx/components/evaluator/component.py index e8ccfbe7d1..9af4c46730 100644 --- a/tfx/components/evaluator/component.py +++ b/tfx/components/evaluator/component.py @@ -17,6 +17,7 @@ from absl import logging import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx import types from tfx.components.evaluator import executor from tfx.components.util import udf_utils @@ -57,7 +58,7 @@ def __init__( fairness_indicator_thresholds: Optional[Union[ List[float], data_types.RuntimeParameter]] = None, example_splits: Optional[List[str]] = None, - eval_config: Optional[tfma.EvalConfig] = None, + eval_config: Optional[config_pb2.EvalConfig] = None, schema: Optional[types.BaseChannel] = None, module_file: Optional[str] = None, module_path: Optional[str] = None): @@ -82,7 +83,7 @@ def __init__( example_splits: Names of splits on which the metrics are computed. Default behavior (when example_splits is set to None or Empty) is using the 'eval' split. - eval_config: Instance of tfma.EvalConfig containg configuration settings + eval_config: Instance of config_pb2.EvalConfig containg configuration settings for running the evaluation. This config has options for both estimator and Keras. schema: A `Schema` channel to use for TFXIO. diff --git a/tfx/components/evaluator/component_test.py b/tfx/components/evaluator/component_test.py index a160e79c80..1d85fd7a5f 100644 --- a/tfx/components/evaluator/component_test.py +++ b/tfx/components/evaluator/component_test.py @@ -15,6 +15,7 @@ import tensorflow as tf import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx.components.evaluator import component from tfx.orchestration import data_types @@ -102,8 +103,8 @@ def testConstructWithEvalConfig(self): evaluator = component.Evaluator( examples=channel_utils.as_channel([examples]), model=channel_utils.as_channel([model_exports]), - eval_config=tfma.EvalConfig( - slicing_specs=[tfma.SlicingSpec(feature_keys=['trip_start_hour'])]), + eval_config=config_pb2.EvalConfig( + slicing_specs=[tfma.proto.config_pb2.SlicingSpec(feature_keys=['trip_start_hour'])]), schema=channel_utils.as_channel([schema]),) self.assertEqual(standard_artifacts.ModelEvaluation.TYPE_NAME, evaluator.outputs['evaluation'].type_name) diff --git a/tfx/components/evaluator/executor.py b/tfx/components/evaluator/executor.py index 938a031671..e5189c0a78 100644 --- a/tfx/components/evaluator/executor.py +++ b/tfx/components/evaluator/executor.py @@ -19,6 +19,7 @@ from absl import logging import apache_beam as beam import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 # Need to import the following module so that the fairness indicator post-export # metric is registered. from tfx import types @@ -74,7 +75,7 @@ def Do(self, input_dict: Dict[str, List[types.Artifact]], output_dict: Output dict from output key to a list of Artifacts. - evaluation: model evaluation results. exec_properties: A dict of execution properties. - - eval_config: JSON string of tfma.EvalConfig. + - eval_config: JSON string of config_pb2.EvalConfig. - feature_slicing_spec: JSON string of evaluator_pb2.FeatureSlicingSpec instance, providing the way to slice the data. Deprecated, use eval_config.slicing_specs instead. @@ -126,7 +127,7 @@ def Do(self, input_dict: Dict[str, List[types.Artifact]], slice_spec = None has_baseline = bool( input_dict.get(standard_component_specs.BASELINE_MODEL_KEY)) - eval_config = tfma.EvalConfig() + eval_config = config_pb2.EvalConfig() proto_utils.json_to_proto( exec_properties[standard_component_specs.EVAL_CONFIG_KEY], eval_config) diff --git a/tfx/components/evaluator/executor_test.py b/tfx/components/evaluator/executor_test.py index 93bdf201e7..1f36252fd3 100644 --- a/tfx/components/evaluator/executor_test.py +++ b/tfx/components/evaluator/executor_test.py @@ -22,6 +22,7 @@ from absl.testing import parameterized import tensorflow as tf import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx.components.evaluator import executor from tfx.components.testdata.module_file import evaluator_module from tfx.dsl.io import fileio @@ -40,18 +41,18 @@ class ExecutorTest(tf.test.TestCase, parameterized.TestCase): ('evaluation_w_eval_config', { standard_component_specs.EVAL_CONFIG_KEY: proto_utils.proto_to_json( - tfma.EvalConfig(slicing_specs=[ - tfma.SlicingSpec(feature_keys=['trip_start_hour']), - tfma.SlicingSpec( + config_pb2.EvalConfig(slicing_specs=[ + tfma.proto.config_pb2.SlicingSpec(feature_keys=['trip_start_hour']), + tfma.proto.config_pb2.SlicingSpec( feature_keys=['trip_start_day', 'trip_miles']), ])) }), ('evaluation_w_module_file', { standard_component_specs.EVAL_CONFIG_KEY: proto_utils.proto_to_json( - tfma.EvalConfig(slicing_specs=[ - tfma.SlicingSpec(feature_keys=['trip_start_hour']), - tfma.SlicingSpec( + config_pb2.EvalConfig(slicing_specs=[ + tfma.proto.config_pb2.SlicingSpec(feature_keys=['trip_start_hour']), + tfma.proto.config_pb2.SlicingSpec( feature_keys=['trip_start_day', 'trip_miles']), ])), standard_component_specs.MODULE_FILE_KEY: @@ -60,9 +61,9 @@ class ExecutorTest(tf.test.TestCase, parameterized.TestCase): ('evaluation_w_module_path', { standard_component_specs.EVAL_CONFIG_KEY: proto_utils.proto_to_json( - tfma.EvalConfig(slicing_specs=[ - tfma.SlicingSpec(feature_keys=['trip_start_hour']), - tfma.SlicingSpec( + config_pb2.EvalConfig(slicing_specs=[ + tfma.proto.config_pb2.SlicingSpec(feature_keys=['trip_start_hour']), + tfma.proto.config_pb2.SlicingSpec( feature_keys=['trip_start_day', 'trip_miles']), ])), standard_component_specs.MODULE_PATH_KEY: @@ -71,14 +72,14 @@ class ExecutorTest(tf.test.TestCase, parameterized.TestCase): ('model_agnostic_evaluation', { standard_component_specs.EVAL_CONFIG_KEY: proto_utils.proto_to_json( - tfma.EvalConfig( + config_pb2.EvalConfig( model_specs=[ - tfma.ModelSpec( + tfma.proto.config_pb2.ModelSpec( label_key='tips', prediction_key='tips'), ], slicing_specs=[ - tfma.SlicingSpec(feature_keys=['trip_start_hour']), - tfma.SlicingSpec( + tfma.proto.config_pb2.SlicingSpec(feature_keys=['trip_start_hour']), + tfma.proto.config_pb2.SlicingSpec( feature_keys=['trip_start_day', 'trip_miles']), ])) }, True), @@ -214,22 +215,22 @@ def testDoLegacySingleEvalSavedModelWFairness(self, exec_properties): { standard_component_specs.EVAL_CONFIG_KEY: proto_utils.proto_to_json( - tfma.EvalConfig( + config_pb2.EvalConfig( model_specs=[ - tfma.ModelSpec(label_key='tips'), + tfma.proto.config_pb2.ModelSpec(label_key='tips'), ], metrics_specs=[ - tfma.MetricsSpec(metrics=[ - tfma.MetricConfig( + tfma.proto.config_pb2.MetricsSpec(metrics=[ + tfma.proto.config_pb2.MetricConfig( class_name='ExampleCount', # Count > 0, OK. - threshold=tfma.MetricThreshold( + threshold=tfma.proto.config_pb2.MetricThreshold( value_threshold=tfma - .GenericValueThreshold( + .proto.config_pb2.GenericValueThreshold( lower_bound={'value': 0}))), ]), ], - slicing_specs=[tfma.SlicingSpec()])) + slicing_specs=[tfma.proto.config_pb2.SlicingSpec()])) }, True, True), @@ -238,27 +239,27 @@ def testDoLegacySingleEvalSavedModelWFairness(self, exec_properties): { standard_component_specs.EVAL_CONFIG_KEY: proto_utils.proto_to_json( - tfma.EvalConfig( + config_pb2.EvalConfig( model_specs=[ - tfma.ModelSpec( + tfma.proto.config_pb2.ModelSpec( name='baseline1', label_key='tips', is_baseline=True), - tfma.ModelSpec( + tfma.proto.config_pb2.ModelSpec( name='candidate1', label_key='tips'), ], metrics_specs=[ - tfma.MetricsSpec(metrics=[ - tfma.MetricConfig( + tfma.proto.config_pb2.MetricsSpec(metrics=[ + tfma.proto.config_pb2.MetricConfig( class_name='ExampleCount', # Count < -1, NOT OK. - threshold=tfma.MetricThreshold( + threshold=tfma.proto.config_pb2.MetricThreshold( value_threshold=tfma - .GenericValueThreshold( + .proto.config_pb2.GenericValueThreshold( upper_bound={'value': -1}))), ]), ], - slicing_specs=[tfma.SlicingSpec()])) + slicing_specs=[tfma.proto.config_pb2.SlicingSpec()])) }, False, True), @@ -267,36 +268,36 @@ def testDoLegacySingleEvalSavedModelWFairness(self, exec_properties): { standard_component_specs.EVAL_CONFIG_KEY: proto_utils.proto_to_json( - tfma.EvalConfig( + config_pb2.EvalConfig( model_specs=[ - tfma.ModelSpec( + tfma.proto.config_pb2.ModelSpec( name='baseline', label_key='tips', is_baseline=True), - tfma.ModelSpec( + tfma.proto.config_pb2.ModelSpec( name='candidate', label_key='tips'), ], metrics_specs=[ - tfma.MetricsSpec(metrics=[ - tfma.MetricConfig( + tfma.proto.config_pb2.MetricsSpec(metrics=[ + tfma.proto.config_pb2.MetricConfig( class_name='ExampleCount', # Count > 0, OK. - threshold=tfma.MetricThreshold( + threshold=tfma.proto.config_pb2.MetricThreshold( value_threshold=tfma - .GenericValueThreshold( + .proto.config_pb2.GenericValueThreshold( lower_bound={'value': 0}))), - tfma.MetricConfig( + tfma.proto.config_pb2.MetricConfig( class_name='Accuracy', # Should be ignored due to no baseline. - threshold=tfma.MetricThreshold( + threshold=tfma.proto.config_pb2.MetricThreshold( change_threshold=tfma - .GenericChangeThreshold( + .proto.config_pb2.GenericChangeThreshold( relative={'value': 0}, - direction=tfma.MetricDirection + direction=tfma.proto.config_pb2.MetricDirection .LOWER_IS_BETTER))), ]), ], - slicing_specs=[tfma.SlicingSpec()])) + slicing_specs=[tfma.proto.config_pb2.SlicingSpec()])) }, True, False)) diff --git a/tfx/components/testdata/module_file/evaluator_module.py b/tfx/components/testdata/module_file/evaluator_module.py index 29b36403d5..ecc69e68b6 100644 --- a/tfx/components/testdata/module_file/evaluator_module.py +++ b/tfx/components/testdata/module_file/evaluator_module.py @@ -16,6 +16,7 @@ from typing import Any, Dict, List import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx_bsl.tfxio import tensor_adapter @@ -35,7 +36,7 @@ def custom_eval_shared_model(eval_saved_model_path: str, model_name: str, - eval_config: tfma.EvalConfig, + eval_config: config_pb2.EvalConfig, **kwargs: Dict[str, Any]) -> _EvalSharedModel: return tfma.default_eval_shared_model( eval_saved_model_path=eval_saved_model_path, @@ -46,7 +47,7 @@ def custom_eval_shared_model(eval_saved_model_path: str, model_name: str, def custom_extractors( eval_shared_model: _MaybeMultipleEvalSharedModels, - eval_config: tfma.EvalConfig, + eval_config: config_pb2.EvalConfig, tensor_adapter_config: tensor_adapter.TensorAdapterConfig, ) -> List[tfma.extractors.Extractor]: return tfma.default_extractors( diff --git a/tfx/dsl/compiler/testdata/composable_pipeline.py b/tfx/dsl/compiler/testdata/composable_pipeline.py index 3d4104ae9c..be2a7ffe9d 100644 --- a/tfx/dsl/compiler/testdata/composable_pipeline.py +++ b/tfx/dsl/compiler/testdata/composable_pipeline.py @@ -15,6 +15,7 @@ import os import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx.components import CsvExampleGen from tfx.components import Evaluator from tfx.components import InfraValidator @@ -145,18 +146,18 @@ def create_test_pipeline(): trigger_strategy=pipeline_pb2.NodeExecutionOptions.TriggerStrategy.LAZILY_ALL_UPSTREAM_NODES_SUCCEEDED, ) - eval_config = tfma.EvalConfig( - model_specs=[tfma.ModelSpec(signature_name="eval")], - slicing_specs=[tfma.SlicingSpec()], + eval_config = config_pb2.EvalConfig( + model_specs=[tfma.proto.config_pb2.ModelSpec(signature_name="eval")], + slicing_specs=[tfma.proto.config_pb2.SlicingSpec()], metrics_specs=[ - tfma.MetricsSpec( + tfma.proto.config_pb2.MetricsSpec( thresholds={ "sparse_categorical_accuracy": - tfma.MetricThreshold( - value_threshold=tfma.GenericValueThreshold( + tfma.proto.config_pb2.MetricThreshold( + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( lower_bound={"value": 0.6}), - change_threshold=tfma.GenericChangeThreshold( - direction=tfma.MetricDirection.HIGHER_IS_BETTER, + change_threshold=tfma.proto.config_pb2.GenericChangeThreshold( + direction=tfma.proto.config_pb2.MetricDirection.HIGHER_IS_BETTER, absolute={"value": -1e-10})) }) ]) diff --git a/tfx/dsl/compiler/testdata/composable_pipeline_async.py b/tfx/dsl/compiler/testdata/composable_pipeline_async.py index 03af36b1e8..34b5489b17 100644 --- a/tfx/dsl/compiler/testdata/composable_pipeline_async.py +++ b/tfx/dsl/compiler/testdata/composable_pipeline_async.py @@ -15,6 +15,7 @@ import os import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx.components import CsvExampleGen from tfx.components import Evaluator from tfx.components import InfraValidator @@ -150,18 +151,18 @@ def create_test_pipeline(): data_ingestion.outputs["schema"].no_trigger(), ) - eval_config = tfma.EvalConfig( - model_specs=[tfma.ModelSpec(signature_name="eval")], - slicing_specs=[tfma.SlicingSpec()], + eval_config = config_pb2.EvalConfig( + model_specs=[tfma.proto.config_pb2.ModelSpec(signature_name="eval")], + slicing_specs=[tfma.proto.config_pb2.SlicingSpec()], metrics_specs=[ - tfma.MetricsSpec( + tfma.proto.config_pb2.MetricsSpec( thresholds={ "sparse_categorical_accuracy": - tfma.MetricThreshold( - value_threshold=tfma.GenericValueThreshold( + tfma.proto.config_pb2.MetricThreshold( + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( lower_bound={"value": 0.6}), - change_threshold=tfma.GenericChangeThreshold( - direction=tfma.MetricDirection.HIGHER_IS_BETTER, + change_threshold=tfma.proto.config_pb2.GenericChangeThreshold( + direction=tfma.proto.config_pb2.MetricDirection.HIGHER_IS_BETTER, absolute={"value": -1e-10})) }) ]) diff --git a/tfx/dsl/compiler/testdata/conditional_pipeline.py b/tfx/dsl/compiler/testdata/conditional_pipeline.py index ea859a2e2d..a1468c9c66 100644 --- a/tfx/dsl/compiler/testdata/conditional_pipeline.py +++ b/tfx/dsl/compiler/testdata/conditional_pipeline.py @@ -15,6 +15,7 @@ import os import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx.components import CsvExampleGen from tfx.components import Evaluator from tfx.components import InfraValidator @@ -53,18 +54,18 @@ def create_test_pipeline(): train_args=trainer_pb2.TrainArgs(num_steps=2000), eval_args=trainer_pb2.EvalArgs(num_steps=5)) - eval_config = tfma.EvalConfig( - model_specs=[tfma.ModelSpec(signature_name="eval")], - slicing_specs=[tfma.SlicingSpec()], + eval_config = config_pb2.EvalConfig( + model_specs=[tfma.proto.config_pb2.ModelSpec(signature_name="eval")], + slicing_specs=[tfma.proto.config_pb2.SlicingSpec()], metrics_specs=[ - tfma.MetricsSpec( + tfma.proto.config_pb2.MetricsSpec( thresholds={ "sparse_categorical_accuracy": - tfma.MetricThreshold( - value_threshold=tfma.GenericValueThreshold( + tfma.proto.config_pb2.MetricThreshold( + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( lower_bound={"value": 0.6}), - change_threshold=tfma.GenericChangeThreshold( - direction=tfma.MetricDirection.HIGHER_IS_BETTER, + change_threshold=tfma.proto.config_pb2.GenericChangeThreshold( + direction=tfma.proto.config_pb2.MetricDirection.HIGHER_IS_BETTER, absolute={"value": -1e-10})) }) ]) diff --git a/tfx/dsl/compiler/testdata/iris_pipeline_async.py b/tfx/dsl/compiler/testdata/iris_pipeline_async.py index 127895e2c1..602db672d1 100644 --- a/tfx/dsl/compiler/testdata/iris_pipeline_async.py +++ b/tfx/dsl/compiler/testdata/iris_pipeline_async.py @@ -15,6 +15,7 @@ import os import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx.components import CsvExampleGen from tfx.components import Evaluator from tfx.components import ExampleValidator @@ -89,18 +90,18 @@ def create_test_pipeline(): # model_blessing=Channel(type=standard_artifacts.ModelBlessing)).with_id( # "latest_blessed_model_resolver") - eval_config = tfma.EvalConfig( - model_specs=[tfma.ModelSpec(signature_name="eval")], - slicing_specs=[tfma.SlicingSpec()], + eval_config = config_pb2.EvalConfig( + model_specs=[tfma.proto.config_pb2.ModelSpec(signature_name="eval")], + slicing_specs=[tfma.proto.config_pb2.SlicingSpec()], metrics_specs=[ - tfma.MetricsSpec( + tfma.proto.config_pb2.MetricsSpec( thresholds={ "sparse_categorical_accuracy": - tfma.MetricThreshold( - value_threshold=tfma.GenericValueThreshold( + tfma.proto.config_pb2.MetricThreshold( + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( lower_bound={"value": 0.6}), - change_threshold=tfma.GenericChangeThreshold( - direction=tfma.MetricDirection.HIGHER_IS_BETTER, + change_threshold=tfma.proto.config_pb2.GenericChangeThreshold( + direction=tfma.proto.config_pb2.MetricDirection.HIGHER_IS_BETTER, absolute={"value": -1e-10})) }) ]) diff --git a/tfx/dsl/compiler/testdata/iris_pipeline_sync.py b/tfx/dsl/compiler/testdata/iris_pipeline_sync.py index 67f8f76b91..af33fabeb1 100644 --- a/tfx/dsl/compiler/testdata/iris_pipeline_sync.py +++ b/tfx/dsl/compiler/testdata/iris_pipeline_sync.py @@ -15,6 +15,7 @@ import os import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx.components import CsvExampleGen from tfx.components import Evaluator from tfx.components import ExampleValidator @@ -90,18 +91,18 @@ def create_test_pipeline(): model_blessing=Channel(type=standard_artifacts.ModelBlessing)).with_id( "latest_blessed_model_resolver") - eval_config = tfma.EvalConfig( - model_specs=[tfma.ModelSpec(signature_name="eval")], - slicing_specs=[tfma.SlicingSpec()], + eval_config = config_pb2.EvalConfig( + model_specs=[tfma.proto.config_pb2.ModelSpec(signature_name="eval")], + slicing_specs=[tfma.proto.config_pb2.SlicingSpec()], metrics_specs=[ - tfma.MetricsSpec( + tfma.proto.config_pb2.MetricsSpec( thresholds={ "sparse_categorical_accuracy": - tfma.MetricThreshold( - value_threshold=tfma.GenericValueThreshold( + tfma.proto.config_pb2.MetricThreshold( + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( lower_bound={"value": 0.6}), - change_threshold=tfma.GenericChangeThreshold( - direction=tfma.MetricDirection.HIGHER_IS_BETTER, + change_threshold=tfma.proto.config_pb2.GenericChangeThreshold( + direction=tfma.proto.config_pb2.MetricDirection.HIGHER_IS_BETTER, absolute={"value": -1e-10})) }) ]) diff --git a/tfx/examples/airflow_workshop/taxi/setup/dags/taxi_pipeline.py b/tfx/examples/airflow_workshop/taxi/setup/dags/taxi_pipeline.py index 0c6f81bfe2..876953ddba 100644 --- a/tfx/examples/airflow_workshop/taxi/setup/dags/taxi_pipeline.py +++ b/tfx/examples/airflow_workshop/taxi/setup/dags/taxi_pipeline.py @@ -21,6 +21,7 @@ from typing import List import tensorflow_model_analysis as tfma # Step 5 +from tensorflow_model_analysis.proto import config_pb2 from tfx.components import CsvExampleGen from tfx.components import Evaluator # Step 6 from tfx.components import ExampleValidator # Step 3 @@ -130,17 +131,17 @@ def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, # Uses TFMA to compute a evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). - eval_config = tfma.EvalConfig( # Step 6 + eval_config = config_pb2.EvalConfig( # Step 6 model_specs=[ # Step 6 # This assumes a serving model with signature 'serving_default'. - tfma.ModelSpec( # Step 6 + tfma.proto.config_pb2.ModelSpec( # Step 6 signature_name='serving_default', # Step 6 label_key='tips', # Step 6 preprocessing_function_names=['transform_features'], # Step 6 ) # Step 6 ], # Step 6 metrics_specs=[ # Step 6 - tfma.MetricsSpec( # Step 6 + tfma.proto.config_pb2.MetricsSpec( # Step 6 # The metrics added here are in addition to those saved with the # model (assuming either a keras model or EvalSavedModel is used). # Any metrics added into the saved model (for example using @@ -149,17 +150,17 @@ def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, # To add validation thresholds for metrics saved with the model, # add them keyed by metric name to the thresholds map. metrics=[ # Step 6 - tfma.MetricConfig(class_name='ExampleCount'), # Step 6 - tfma.MetricConfig( + tfma.proto.config_pb2.MetricConfig(class_name='ExampleCount'), # Step 6 + tfma.proto.config_pb2.MetricConfig( class_name='BinaryAccuracy', # Step 6 - threshold=tfma.MetricThreshold( # Step 6 - value_threshold=tfma.GenericValueThreshold( # Step 6 + threshold=tfma.proto.config_pb2.MetricThreshold( # Step 6 + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( # Step 6 lower_bound={'value': 0.5}), # Step 6 # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). change_threshold=tfma .GenericChangeThreshold( # Step 6 - direction=tfma.MetricDirection + direction=tfma.proto.config_pb2.MetricDirection .HIGHER_IS_BETTER, # Step 6 absolute={'value': -1e-10}))) # Step 6 ] # Step 6 @@ -167,10 +168,10 @@ def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, ], # Step 6 slicing_specs=[ # Step 6 # An empty slice spec means the overall slice, i.e. the whole dataset. - tfma.SlicingSpec(), # Step 6 + tfma.proto.config_pb2.SlicingSpec(), # Step 6 # Data can be sliced along a feature column. In this case, data is # sliced along feature column trip_start_hour. - tfma.SlicingSpec( # Step 6 + tfma.proto.config_pb2.SlicingSpec( # Step 6 feature_keys=['trip_start_hour']) # Step 6 ]) # Step 6 diff --git a/tfx/examples/bert/cola/bert_cola_pipeline.py b/tfx/examples/bert/cola/bert_cola_pipeline.py index 98f2007c60..eeb5e5c123 100644 --- a/tfx/examples/bert/cola/bert_cola_pipeline.py +++ b/tfx/examples/bert/cola/bert_cola_pipeline.py @@ -18,6 +18,7 @@ import absl import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx.components import CsvExampleGen from tfx.components import Evaluator from tfx.components import ExampleValidator @@ -117,23 +118,23 @@ def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, # Uses TFMA to compute evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). - eval_config = tfma.EvalConfig( - model_specs=[tfma.ModelSpec(label_key='label')], - slicing_specs=[tfma.SlicingSpec()], + eval_config = config_pb2.EvalConfig( + model_specs=[tfma.proto.config_pb2.ModelSpec(label_key='label')], + slicing_specs=[tfma.proto.config_pb2.SlicingSpec()], metrics_specs=[ - tfma.MetricsSpec(metrics=[ - tfma.MetricConfig( + tfma.proto.config_pb2.MetricsSpec(metrics=[ + tfma.proto.config_pb2.MetricConfig( class_name='SparseCategoricalAccuracy', - threshold=tfma.MetricThreshold( - value_threshold=tfma.GenericValueThreshold( + threshold=tfma.proto.config_pb2.MetricThreshold( + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( # Adjust the threshold when training on the # full dataset. # TODO(b/236089934): Change back to 0.5. lower_bound={'value': 0.1}), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). - change_threshold=tfma.GenericChangeThreshold( - direction=tfma.MetricDirection.HIGHER_IS_BETTER, + change_threshold=tfma.proto.config_pb2.GenericChangeThreshold( + direction=tfma.proto.config_pb2.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-2}))) ]) ]) diff --git a/tfx/examples/bert/mrpc/bert_mrpc_pipeline.py b/tfx/examples/bert/mrpc/bert_mrpc_pipeline.py index 8a64e480e0..b8625110d0 100644 --- a/tfx/examples/bert/mrpc/bert_mrpc_pipeline.py +++ b/tfx/examples/bert/mrpc/bert_mrpc_pipeline.py @@ -18,6 +18,7 @@ import absl import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx.components import CsvExampleGen from tfx.components import Evaluator from tfx.components import ExampleValidator @@ -117,23 +118,23 @@ def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, # Uses TFMA to compute evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). - eval_config = tfma.EvalConfig( - model_specs=[tfma.ModelSpec(label_key='label')], - slicing_specs=[tfma.SlicingSpec()], + eval_config = config_pb2.EvalConfig( + model_specs=[tfma.proto.config_pb2.ModelSpec(label_key='label')], + slicing_specs=[tfma.proto.config_pb2.SlicingSpec()], metrics_specs=[ - tfma.MetricsSpec(metrics=[ - tfma.MetricConfig( + tfma.proto.config_pb2.MetricsSpec(metrics=[ + tfma.proto.config_pb2.MetricConfig( class_name='SparseCategoricalAccuracy', - threshold=tfma.MetricThreshold( - value_threshold=tfma.GenericValueThreshold( + threshold=tfma.proto.config_pb2.MetricThreshold( + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( # Adjust the threshold when training on the # full dataset. # TODO(b/236089934): Change back to 0.5. lower_bound={'value': 0.1}), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). - change_threshold=tfma.GenericChangeThreshold( - direction=tfma.MetricDirection.HIGHER_IS_BETTER, + change_threshold=tfma.proto.config_pb2.GenericChangeThreshold( + direction=tfma.proto.config_pb2.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-2}))) ]) ]) diff --git a/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_native_keras.py b/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_native_keras.py index a0d509c036..5453afa288 100644 --- a/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_native_keras.py +++ b/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_native_keras.py @@ -18,6 +18,7 @@ import absl import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx.components import CsvExampleGen from tfx.components import Evaluator from tfx.components import ExampleValidator @@ -114,24 +115,24 @@ def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, # Uses TFMA to compute a evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). - eval_config = tfma.EvalConfig( + eval_config = config_pb2.EvalConfig( model_specs=[ - tfma.ModelSpec( + tfma.proto.config_pb2.ModelSpec( signature_name='serving_default', label_key='tips_xf', preprocessing_function_names=['transform_features']) ], - slicing_specs=[tfma.SlicingSpec()], + slicing_specs=[tfma.proto.config_pb2.SlicingSpec()], metrics_specs=[ - tfma.MetricsSpec(metrics=[ - tfma.MetricConfig( + tfma.proto.config_pb2.MetricsSpec(metrics=[ + tfma.proto.config_pb2.MetricConfig( class_name='BinaryAccuracy', - threshold=tfma.MetricThreshold( - value_threshold=tfma.GenericValueThreshold( + threshold=tfma.proto.config_pb2.MetricThreshold( + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( lower_bound={'value': 0.6}), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). - change_threshold=tfma.GenericChangeThreshold( - direction=tfma.MetricDirection.HIGHER_IS_BETTER, + change_threshold=tfma.proto.config_pb2.GenericChangeThreshold( + direction=tfma.proto.config_pb2.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-10}))) ]) ]) diff --git a/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py b/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py index 5e5faf18ef..28cdc05bb4 100644 --- a/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py +++ b/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py @@ -18,6 +18,7 @@ from typing import List import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx.components import CsvExampleGen from tfx.components import Evaluator from tfx.components import ExampleValidator @@ -129,23 +130,23 @@ def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, # Uses TFMA to compute a evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). - eval_config = tfma.EvalConfig( - model_specs=[tfma.ModelSpec(signature_name='eval')], + eval_config = config_pb2.EvalConfig( + model_specs=[tfma.proto.config_pb2.ModelSpec(signature_name='eval')], slicing_specs=[ - tfma.SlicingSpec(), - tfma.SlicingSpec(feature_keys=['trip_start_hour']) + tfma.proto.config_pb2.SlicingSpec(), + tfma.proto.config_pb2.SlicingSpec(feature_keys=['trip_start_hour']) ], metrics_specs=[ - tfma.MetricsSpec( + tfma.proto.config_pb2.MetricsSpec( thresholds={ 'accuracy': - tfma.MetricThreshold( - value_threshold=tfma.GenericValueThreshold( + tfma.proto.config_pb2.MetricThreshold( + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( lower_bound={'value': 0.6}), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). - change_threshold=tfma.GenericChangeThreshold( - direction=tfma.MetricDirection.HIGHER_IS_BETTER, + change_threshold=tfma.proto.config_pb2.GenericChangeThreshold( + direction=tfma.proto.config_pb2.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-10})) }) ]) diff --git a/tfx/examples/imdb/imdb_fetch_data.py b/tfx/examples/imdb/imdb_fetch_data.py index c7cf9e564a..a01cabb3de 100644 --- a/tfx/examples/imdb/imdb_fetch_data.py +++ b/tfx/examples/imdb/imdb_fetch_data.py @@ -23,7 +23,7 @@ # Change the hyperparameters to better suit the bigger dataset. # The configurations that were found reasonable are listed below: # imdb_pipeline_native_keras.py: -# tfma.GenericValueThreshold(lower_bound={'value':0.85} +# tfma.proto.config_pb2.GenericValueThreshold(lower_bound={'value':0.85} # trainer_pb2.TrainArgs(num_steps=7000) # trainer_pb2.EvalArgs(num_steps=800) # imdb_utils_native_keras.py: diff --git a/tfx/examples/imdb/imdb_pipeline_native_keras.py b/tfx/examples/imdb/imdb_pipeline_native_keras.py index 9817a38af5..5b89617f0e 100644 --- a/tfx/examples/imdb/imdb_pipeline_native_keras.py +++ b/tfx/examples/imdb/imdb_pipeline_native_keras.py @@ -18,6 +18,7 @@ import absl import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx.components import CsvExampleGen from tfx.components import Evaluator from tfx.components import ExampleValidator @@ -119,22 +120,22 @@ def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, # Uses TFMA to compute evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). - eval_config = tfma.EvalConfig( - model_specs=[tfma.ModelSpec(label_key='label')], - slicing_specs=[tfma.SlicingSpec()], + eval_config = config_pb2.EvalConfig( + model_specs=[tfma.proto.config_pb2.ModelSpec(label_key='label')], + slicing_specs=[tfma.proto.config_pb2.SlicingSpec()], metrics_specs=[ - tfma.MetricsSpec(metrics=[ - tfma.MetricConfig( + tfma.proto.config_pb2.MetricsSpec(metrics=[ + tfma.proto.config_pb2.MetricConfig( class_name='BinaryAccuracy', - threshold=tfma.MetricThreshold( - value_threshold=tfma.GenericValueThreshold( + threshold=tfma.proto.config_pb2.MetricThreshold( + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( # Increase this threshold when training on complete # dataset. lower_bound={'value': 0.01}), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). - change_threshold=tfma.GenericChangeThreshold( - direction=tfma.MetricDirection.HIGHER_IS_BETTER, + change_threshold=tfma.proto.config_pb2.GenericChangeThreshold( + direction=tfma.proto.config_pb2.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-2}))) ]) ]) diff --git a/tfx/examples/mnist/mnist_pipeline_native_keras.py b/tfx/examples/mnist/mnist_pipeline_native_keras.py index d584cab3b6..167f820200 100644 --- a/tfx/examples/mnist/mnist_pipeline_native_keras.py +++ b/tfx/examples/mnist/mnist_pipeline_native_keras.py @@ -18,6 +18,7 @@ import absl import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx.components import Evaluator from tfx.components import ExampleValidator from tfx.components import ImportExampleGen @@ -108,15 +109,15 @@ def _create_trainer(module_file, component_id): # Uses TFMA to compute evaluation statistics over features of a model and # performs quality validation of a candidate model. - eval_config = tfma.EvalConfig( - model_specs=[tfma.ModelSpec(label_key='image_class')], - slicing_specs=[tfma.SlicingSpec()], + eval_config = config_pb2.EvalConfig( + model_specs=[tfma.proto.config_pb2.ModelSpec(label_key='image_class')], + slicing_specs=[tfma.proto.config_pb2.SlicingSpec()], metrics_specs=[ - tfma.MetricsSpec(metrics=[ - tfma.MetricConfig( + tfma.proto.config_pb2.MetricsSpec(metrics=[ + tfma.proto.config_pb2.MetricConfig( class_name='SparseCategoricalAccuracy', - threshold=tfma.MetricThreshold( - value_threshold=tfma.GenericValueThreshold( + threshold=tfma.proto.config_pb2.MetricThreshold( + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( lower_bound={'value': accuracy_threshold}))) ]) ]) diff --git a/tfx/examples/penguin/experimental/penguin_pipeline_sklearn_gcp.py b/tfx/examples/penguin/experimental/penguin_pipeline_sklearn_gcp.py index 7d81337d18..c3f59fe653 100644 --- a/tfx/examples/penguin/experimental/penguin_pipeline_sklearn_gcp.py +++ b/tfx/examples/penguin/experimental/penguin_pipeline_sklearn_gcp.py @@ -18,6 +18,7 @@ import absl import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx import v1 as tfx # Identifier for the pipeline. This will also be used as the model name on AI @@ -163,18 +164,18 @@ def _create_pipeline( # Uses TFMA to compute evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). - eval_config = tfma.EvalConfig( - model_specs=[tfma.ModelSpec(label_key='species')], - slicing_specs=[tfma.SlicingSpec()], + eval_config = config_pb2.EvalConfig( + model_specs=[tfma.proto.config_pb2.ModelSpec(label_key='species')], + slicing_specs=[tfma.proto.config_pb2.SlicingSpec()], metrics_specs=[ - tfma.MetricsSpec(metrics=[ - tfma.MetricConfig( + tfma.proto.config_pb2.MetricsSpec(metrics=[ + tfma.proto.config_pb2.MetricConfig( class_name='Accuracy', - threshold=tfma.MetricThreshold( - value_threshold=tfma.GenericValueThreshold( + threshold=tfma.proto.config_pb2.MetricThreshold( + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( lower_bound={'value': 0.6}), - change_threshold=tfma.GenericChangeThreshold( - direction=tfma.MetricDirection.HIGHER_IS_BETTER, + change_threshold=tfma.proto.config_pb2.GenericChangeThreshold( + direction=tfma.proto.config_pb2.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-10}))) ]) ]) diff --git a/tfx/examples/penguin/experimental/sklearn_predict_extractor.py b/tfx/examples/penguin/experimental/sklearn_predict_extractor.py index f7f3d39536..217f1ed9a1 100644 --- a/tfx/examples/penguin/experimental/sklearn_predict_extractor.py +++ b/tfx/examples/penguin/experimental/sklearn_predict_extractor.py @@ -21,6 +21,7 @@ import apache_beam as beam import tensorflow as tf import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx_bsl.tfxio import tensor_adapter _PREDICT_EXTRACTOR_STAGE_NAME = 'SklearnPredict' @@ -160,7 +161,7 @@ def custom_eval_shared_model( def custom_extractors( eval_shared_model: tfma.MaybeMultipleEvalSharedModels, - eval_config: tfma.EvalConfig, + eval_config: config_pb2.EvalConfig, tensor_adapter_config: tensor_adapter.TensorAdapterConfig, ) -> List[tfma.extractors.Extractor]: """Returns default extractors plus a custom prediction extractor.""" diff --git a/tfx/examples/penguin/experimental/sklearn_predict_extractor_test.py b/tfx/examples/penguin/experimental/sklearn_predict_extractor_test.py index 8f0200c471..4778e6a468 100644 --- a/tfx/examples/penguin/experimental/sklearn_predict_extractor_test.py +++ b/tfx/examples/penguin/experimental/sklearn_predict_extractor_test.py @@ -23,6 +23,7 @@ #from apache_beam.testing import util #from sklearn import neural_network as nn #import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 #from tfx.examples.penguin.experimental import sklearn_predict_extractor #from tfx_bsl.tfxio import tensor_adapter #from tfx_bsl.tfxio import test_util @@ -36,7 +37,7 @@ # super().setUp() # self._eval_export_dir = os.path.join(self._getTempDir(), 'eval_export') # self._create_sklearn_model(self._eval_export_dir) -# self._eval_config = tfma.EvalConfig(model_specs=[tfma.ModelSpec()]) +# self._eval_config = config_pb2.EvalConfig(model_specs=[tfma.proto.config_pb2.ModelSpec()]) # self._eval_shared_model = ( # sklearn_predict_extractor.custom_eval_shared_model( # eval_saved_model_path=self._eval_export_dir, @@ -103,9 +104,9 @@ #"and the test fails.", strict=True) # def testMakeSklearnPredictExtractorWithMultiModels(self): # """Tests that predictions are made from extracts for multiple models.""" -# eval_config = tfma.EvalConfig(model_specs=[ -# tfma.ModelSpec(name='model1'), -# tfma.ModelSpec(name='model2'), +# eval_config = config_pb2.EvalConfig(model_specs=[ +# tfma.proto.config_pb2.ModelSpec(name='model1'), +# tfma.proto.config_pb2.ModelSpec(name='model2'), # ]) # eval_export_dir_1 = os.path.join(self._eval_export_dir, '1') # self._create_sklearn_model(eval_export_dir_1) diff --git a/tfx/examples/penguin/penguin_pipeline_kubeflow.py b/tfx/examples/penguin/penguin_pipeline_kubeflow.py index 5a59b294bf..ea2e07181f 100644 --- a/tfx/examples/penguin/penguin_pipeline_kubeflow.py +++ b/tfx/examples/penguin/penguin_pipeline_kubeflow.py @@ -20,6 +20,7 @@ from absl import flags from absl import logging import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx import v1 as tfx # TODO(b/197359030): test a persistent volume (PV) mounted scenario. @@ -404,25 +405,25 @@ def create_pipeline( # Uses TFMA to compute evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). - eval_config = tfma.EvalConfig( + eval_config = config_pb2.EvalConfig( model_specs=[ - tfma.ModelSpec( + tfma.proto.config_pb2.ModelSpec( signature_name='serving_default', label_key='species_xf', preprocessing_function_names=['transform_features']) ], - slicing_specs=[tfma.SlicingSpec()], + slicing_specs=[tfma.proto.config_pb2.SlicingSpec()], metrics_specs=[ - tfma.MetricsSpec(metrics=[ - tfma.MetricConfig( + tfma.proto.config_pb2.MetricsSpec(metrics=[ + tfma.proto.config_pb2.MetricConfig( class_name='SparseCategoricalAccuracy', - threshold=tfma.MetricThreshold( - value_threshold=tfma.GenericValueThreshold( + threshold=tfma.proto.config_pb2.MetricThreshold( + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( lower_bound={'value': 0.3}), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). - change_threshold=tfma.GenericChangeThreshold( - direction=tfma.MetricDirection.HIGHER_IS_BETTER, + change_threshold=tfma.proto.config_pb2.GenericChangeThreshold( + direction=tfma.proto.config_pb2.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-10}))) ]) ]) diff --git a/tfx/examples/penguin/penguin_pipeline_local.py b/tfx/examples/penguin/penguin_pipeline_local.py index 940665a9eb..f7e9e692ac 100644 --- a/tfx/examples/penguin/penguin_pipeline_local.py +++ b/tfx/examples/penguin/penguin_pipeline_local.py @@ -23,6 +23,7 @@ import absl from absl import flags import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx import v1 as tfx from tfx.utils import proto_utils @@ -292,25 +293,25 @@ def create_pipeline( # pylint: disable=invalid-name # Uses TFMA to compute evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). - eval_config = tfma.EvalConfig( + eval_config = config_pb2.EvalConfig( model_specs=[ - tfma.ModelSpec( + tfma.proto.config_pb2.ModelSpec( signature_name='serving_default', label_key='species_xf', preprocessing_function_names=['transform_features']) ], - slicing_specs=[tfma.SlicingSpec()], + slicing_specs=[tfma.proto.config_pb2.SlicingSpec()], metrics_specs=[ - tfma.MetricsSpec(metrics=[ - tfma.MetricConfig( + tfma.proto.config_pb2.MetricsSpec(metrics=[ + tfma.proto.config_pb2.MetricConfig( class_name='SparseCategoricalAccuracy', - threshold=tfma.MetricThreshold( - value_threshold=tfma.GenericValueThreshold( + threshold=tfma.proto.config_pb2.MetricThreshold( + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( lower_bound={'value': accuracy_threshold}), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). - change_threshold=tfma.GenericChangeThreshold( - direction=tfma.MetricDirection.HIGHER_IS_BETTER, + change_threshold=tfma.proto.config_pb2.GenericChangeThreshold( + direction=tfma.proto.config_pb2.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-10}))) ]) ]) diff --git a/tfx/examples/penguin/penguin_pipeline_local_infraval.py b/tfx/examples/penguin/penguin_pipeline_local_infraval.py index c4deb00c8a..f44a658b9b 100644 --- a/tfx/examples/penguin/penguin_pipeline_local_infraval.py +++ b/tfx/examples/penguin/penguin_pipeline_local_infraval.py @@ -18,6 +18,7 @@ import absl import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx import v1 as tfx _pipeline_name = 'penguin_local_infraval' @@ -112,25 +113,25 @@ def _create_pipeline( # Uses TFMA to compute evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). - eval_config = tfma.EvalConfig( + eval_config = config_pb2.EvalConfig( model_specs=[ - tfma.ModelSpec( + tfma.proto.config_pb2.ModelSpec( signature_name='serving_default', label_key='species_xf', preprocessing_function_names=['transform_features']) ], - slicing_specs=[tfma.SlicingSpec()], + slicing_specs=[tfma.proto.config_pb2.SlicingSpec()], metrics_specs=[ - tfma.MetricsSpec(metrics=[ - tfma.MetricConfig( + tfma.proto.config_pb2.MetricsSpec(metrics=[ + tfma.proto.config_pb2.MetricConfig( class_name='SparseCategoricalAccuracy', - threshold=tfma.MetricThreshold( - value_threshold=tfma.GenericValueThreshold( + threshold=tfma.proto.config_pb2.MetricThreshold( + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( lower_bound={'value': accuracy_threshold}), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). - change_threshold=tfma.GenericChangeThreshold( - direction=tfma.MetricDirection.HIGHER_IS_BETTER, + change_threshold=tfma.proto.config_pb2.GenericChangeThreshold( + direction=tfma.proto.config_pb2.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-10}))) ]) ]) diff --git a/tfx/examples/ranking/ranking_pipeline.py b/tfx/examples/ranking/ranking_pipeline.py index c7b9bdbfbf..b7daa6f2c8 100644 --- a/tfx/examples/ranking/ranking_pipeline.py +++ b/tfx/examples/ranking/ranking_pipeline.py @@ -18,6 +18,7 @@ import absl import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx.components import Evaluator from tfx.components import ImportExampleGen from tfx.components import Pusher @@ -110,28 +111,28 @@ def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, schema=schema_gen.outputs['schema'], eval_args=trainer_pb2.EvalArgs(num_steps=10)) - eval_config = tfma.EvalConfig( + eval_config = config_pb2.EvalConfig( model_specs=[ - tfma.ModelSpec( + tfma.proto.config_pb2.ModelSpec( signature_name='', label_key='relevance', padding_options=tfma.PaddingOptions( label_float_padding=-1.0, prediction_float_padding=-1.0)) ], slicing_specs=[ - tfma.SlicingSpec(), - tfma.SlicingSpec(feature_keys=['query_tokens']), + tfma.proto.config_pb2.SlicingSpec(), + tfma.proto.config_pb2.SlicingSpec(feature_keys=['query_tokens']), ], metrics_specs=[ - tfma.MetricsSpec( + tfma.proto.config_pb2.MetricsSpec( per_slice_thresholds={ 'metric/ndcg_10': tfma.PerSliceMetricThresholds(thresholds=[ tfma.PerSliceMetricThreshold( # The overall slice. - slicing_specs=[tfma.SlicingSpec()], - threshold=tfma.MetricThreshold( - value_threshold=tfma.GenericValueThreshold( + slicing_specs=[tfma.proto.config_pb2.SlicingSpec()], + threshold=tfma.proto.config_pb2.MetricThreshold( + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( lower_bound={'value': 0.6}))) ]) }) diff --git a/tfx/experimental/templates/penguin/pipeline/pipeline.py b/tfx/experimental/templates/penguin/pipeline/pipeline.py index 027f9c7142..e5f94239d0 100644 --- a/tfx/experimental/templates/penguin/pipeline/pipeline.py +++ b/tfx/experimental/templates/penguin/pipeline/pipeline.py @@ -19,6 +19,7 @@ from typing import List, Optional import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx import v1 as tfx from tfx.experimental.templates.penguin.models import features @@ -103,25 +104,25 @@ def create_pipeline( # Uses TFMA to compute a evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). - eval_config = tfma.EvalConfig( + eval_config = config_pb2.EvalConfig( model_specs=[ - tfma.ModelSpec( + tfma.proto.config_pb2.ModelSpec( signature_name='serving_default', label_key=features.LABEL_KEY, # Use transformed label key if Transform is used. # label_key=features.transformed_name(features.LABEL_KEY), preprocessing_function_names=['transform_features']) ], - slicing_specs=[tfma.SlicingSpec()], + slicing_specs=[tfma.proto.config_pb2.SlicingSpec()], metrics_specs=[ - tfma.MetricsSpec(metrics=[ - tfma.MetricConfig( + tfma.proto.config_pb2.MetricsSpec(metrics=[ + tfma.proto.config_pb2.MetricConfig( class_name='SparseCategoricalAccuracy', - threshold=tfma.MetricThreshold( - value_threshold=tfma.GenericValueThreshold( + threshold=tfma.proto.config_pb2.MetricThreshold( + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( lower_bound={'value': eval_accuracy_threshold}), - change_threshold=tfma.GenericChangeThreshold( - direction=tfma.MetricDirection.HIGHER_IS_BETTER, + change_threshold=tfma.proto.config_pb2.GenericChangeThreshold( + direction=tfma.proto.config_pb2.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-10}))) ]) ]) diff --git a/tfx/experimental/templates/taxi/pipeline/pipeline.py b/tfx/experimental/templates/taxi/pipeline/pipeline.py index 0da29ae8ea..65b526cab1 100644 --- a/tfx/experimental/templates/taxi/pipeline/pipeline.py +++ b/tfx/experimental/templates/taxi/pipeline/pipeline.py @@ -19,6 +19,7 @@ from typing import Any, Dict, List, Optional import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx import v1 as tfx from ml_metadata.proto import metadata_store_pb2 @@ -121,23 +122,23 @@ def create_pipeline( # Uses TFMA to compute a evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). - eval_config = tfma.EvalConfig( + eval_config = config_pb2.EvalConfig( model_specs=[ - tfma.ModelSpec( + tfma.proto.config_pb2.ModelSpec( signature_name='serving_default', label_key='tips_xf', preprocessing_function_names=['transform_features']) ], - slicing_specs=[tfma.SlicingSpec()], + slicing_specs=[tfma.proto.config_pb2.SlicingSpec()], metrics_specs=[ - tfma.MetricsSpec(metrics=[ - tfma.MetricConfig( + tfma.proto.config_pb2.MetricsSpec(metrics=[ + tfma.proto.config_pb2.MetricConfig( class_name='BinaryAccuracy', - threshold=tfma.MetricThreshold( - value_threshold=tfma.GenericValueThreshold( + threshold=tfma.proto.config_pb2.MetricThreshold( + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( lower_bound={'value': eval_accuracy_threshold}), - change_threshold=tfma.GenericChangeThreshold( - direction=tfma.MetricDirection.HIGHER_IS_BETTER, + change_threshold=tfma.proto.config_pb2.GenericChangeThreshold( + direction=tfma.proto.config_pb2.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-10}))) ]) ]) diff --git a/tfx/orchestration/kubeflow/test_utils.py b/tfx/orchestration/kubeflow/test_utils.py index 71e81f24f3..a87cbb932f 100644 --- a/tfx/orchestration/kubeflow/test_utils.py +++ b/tfx/orchestration/kubeflow/test_utils.py @@ -23,6 +23,7 @@ import kfp from kfp_server_api import rest import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx.components import CsvExampleGen from tfx.components import Evaluator from tfx.components import ExampleValidator @@ -274,24 +275,24 @@ def create_e2e_components( module_file=trainer_module, ) # Set the TFMA config for Model Evaluation and Validation. - eval_config = tfma.EvalConfig( - model_specs=[tfma.ModelSpec(signature_name='eval')], + eval_config = config_pb2.EvalConfig( + model_specs=[tfma.proto.config_pb2.ModelSpec(signature_name='eval')], metrics_specs=[ - tfma.MetricsSpec( - metrics=[tfma.MetricConfig(class_name='ExampleCount')], + tfma.proto.config_pb2.MetricsSpec( + metrics=[tfma.proto.config_pb2.MetricConfig(class_name='ExampleCount')], thresholds={ 'accuracy': - tfma.MetricThreshold( - value_threshold=tfma.GenericValueThreshold( + tfma.proto.config_pb2.MetricThreshold( + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( lower_bound={'value': 0.5}), - change_threshold=tfma.GenericChangeThreshold( - direction=tfma.MetricDirection.HIGHER_IS_BETTER, + change_threshold=tfma.proto.config_pb2.GenericChangeThreshold( + direction=tfma.proto.config_pb2.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-10})) }) ], slicing_specs=[ - tfma.SlicingSpec(), - tfma.SlicingSpec(feature_keys=['trip_start_hour']) + tfma.proto.config_pb2.SlicingSpec(), + tfma.proto.config_pb2.SlicingSpec(feature_keys=['trip_start_hour']) ]) evaluator = Evaluator( examples=example_gen.outputs['examples'], diff --git a/tfx/orchestration/kubeflow/v2/test_utils.py b/tfx/orchestration/kubeflow/v2/test_utils.py index 98cc73105f..eaf0c52605 100644 --- a/tfx/orchestration/kubeflow/v2/test_utils.py +++ b/tfx/orchestration/kubeflow/v2/test_utils.py @@ -19,6 +19,7 @@ from kfp.pipeline_spec import pipeline_spec_pb2 as pipeline_pb2 import tensorflow_model_analysis as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx import v1 as tfx from tfx.components.example_gen import utils from tfx.dsl.component.experimental import executor_specs @@ -236,24 +237,24 @@ def create_pipeline_components( 'Resolver.latest_blessed_model_resolver') # Uses TFMA to compute a evaluation statistics over features of a model and # perform quality validation of a candidate model (compared to a baseline). - eval_config = tfma.EvalConfig( + eval_config = config_pb2.EvalConfig( model_specs=[ - tfma.ModelSpec( + tfma.proto.config_pb2.ModelSpec( signature_name='serving_default', label_key='tips_xf', preprocessing_function_names=['transform_features']) ], - slicing_specs=[tfma.SlicingSpec()], + slicing_specs=[tfma.proto.config_pb2.SlicingSpec()], metrics_specs=[ - tfma.MetricsSpec(metrics=[ - tfma.MetricConfig( + tfma.proto.config_pb2.MetricsSpec(metrics=[ + tfma.proto.config_pb2.MetricConfig( class_name='BinaryAccuracy', - threshold=tfma.MetricThreshold( - value_threshold=tfma.GenericValueThreshold( + threshold=tfma.proto.config_pb2.MetricThreshold( + value_threshold=tfma.proto.config_pb2.GenericValueThreshold( lower_bound={'value': 0.6}), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). - change_threshold=tfma.GenericChangeThreshold( - direction=tfma.MetricDirection.HIGHER_IS_BETTER, + change_threshold=tfma.proto.config_pb2.GenericChangeThreshold( + direction=tfma.proto.config_pb2.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-10}))) ]) ]) diff --git a/tfx/types/standard_component_specs.py b/tfx/types/standard_component_specs.py index a2d2456458..4929c9ff5b 100644 --- a/tfx/types/standard_component_specs.py +++ b/tfx/types/standard_component_specs.py @@ -14,7 +14,7 @@ """Component specifications for the standard set of TFX Components.""" from tensorflow_data_validation.anomalies.proto import custom_validation_config_pb2 -from tensorflow_model_analysis import sdk as tfma +from tensorflow_model_analysis.proto import config_pb2 from tfx.proto import bulk_inferrer_pb2 from tfx.proto import distribution_validator_pb2 from tfx.proto import evaluator_pb2 @@ -168,7 +168,7 @@ class EvaluatorSpec(ComponentSpec): """Evaluator component spec.""" PARAMETERS = { - EVAL_CONFIG_KEY: ExecutionParameter(type=tfma.EvalConfig, optional=True), + EVAL_CONFIG_KEY: ExecutionParameter(type=config_pb2.EvalConfig, optional=True), # TODO(b/181911822): Deprecated, use eval_config.slicing_specs. FEATURE_SLICING_SPEC_KEY: ExecutionParameter( type=evaluator_pb2.FeatureSlicingSpec, optional=True