Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ classifiers = [
"Topic :: Software Development :: Libraries :: Python Modules"
]
keywords = ["tensorflow", "tfx"]
requires-python = ">=3.9,<3.11"
requires-python = ">=3.9,<3.14"
[project.urls]
Homepage = "https://www.tensorflow.org/tfx"
Repository = "https://github.com/tensorflow/tfx"
Expand Down
23 changes: 12 additions & 11 deletions tfx/benchmarks/tfma_v2_benchmark_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import numpy as np
import tensorflow as tf
import tensorflow_model_analysis as tfma
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.evaluators import metrics_plots_and_validations_evaluator
from tensorflow_model_analysis.evaluators import poisson_bootstrap
from tensorflow_model_analysis.extractors import example_weights_extractor
Expand Down Expand Up @@ -73,16 +74,16 @@ def _init_model(self, multi_model, validation):
if validation:
# Only one metric, adding a threshold for all slices.
metric_specs[0].metrics[0].threshold.CopyFrom(
tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
tfma.proto.config_pb2.MetricThreshold(
value_threshold=tfma.proto.config_pb2.GenericValueThreshold(
lower_bound={"value": 0.5}, upper_bound={"value": 0.5}),
change_threshold=tfma.GenericChangeThreshold(
change_threshold=tfma.proto.config_pb2.GenericChangeThreshold(
absolute={"value": -0.001},
direction=tfma.MetricDirection.HIGHER_IS_BETTER)))
self._eval_config = tfma.EvalConfig(
direction=tfma.proto.config_pb2.MetricDirection.HIGHER_IS_BETTER)))
self._eval_config = config_pb2.EvalConfig(
model_specs=[
tfma.ModelSpec(name="candidate", label_key="tips"),
tfma.ModelSpec(
tfma.proto.config_pb2.ModelSpec(name="candidate", label_key="tips"),
tfma.proto.config_pb2.ModelSpec(
name="baseline", label_key="tips", is_baseline=True)
],
metrics_specs=metric_specs)
Expand All @@ -104,11 +105,11 @@ def _init_model(self, multi_model, validation):
if validation:
# Only one metric, adding a threshold for all slices.
metric_specs[0].metrics[0].threshold.CopyFrom(
tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
tfma.proto.config_pb2.MetricThreshold(
value_threshold=tfma.proto.config_pb2.GenericValueThreshold(
lower_bound={"value": 0.5}, upper_bound={"value": 0.5})))
self._eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key="tips")],
self._eval_config = config_pb2.EvalConfig(
model_specs=[tfma.proto.config_pb2.ModelSpec(label_key="tips")],
metrics_specs=metric_specs)
self._eval_shared_models = {
"":
Expand Down
5 changes: 3 additions & 2 deletions tfx/components/evaluator/component.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

from absl import logging
import tensorflow_model_analysis as tfma
from tensorflow_model_analysis.proto import config_pb2
from tfx import types
from tfx.components.evaluator import executor
from tfx.components.util import udf_utils
Expand Down Expand Up @@ -57,7 +58,7 @@ def __init__(
fairness_indicator_thresholds: Optional[Union[
List[float], data_types.RuntimeParameter]] = None,
example_splits: Optional[List[str]] = None,
eval_config: Optional[tfma.EvalConfig] = None,
eval_config: Optional[config_pb2.EvalConfig] = None,
schema: Optional[types.BaseChannel] = None,
module_file: Optional[str] = None,
module_path: Optional[str] = None):
Expand All @@ -82,7 +83,7 @@ def __init__(
example_splits: Names of splits on which the metrics are computed.
Default behavior (when example_splits is set to None or Empty) is using
the 'eval' split.
eval_config: Instance of tfma.EvalConfig containg configuration settings
eval_config: Instance of config_pb2.EvalConfig containg configuration settings
for running the evaluation. This config has options for both estimator
and Keras.
schema: A `Schema` channel to use for TFXIO.
Expand Down
5 changes: 3 additions & 2 deletions tfx/components/evaluator/component_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

import tensorflow as tf
import tensorflow_model_analysis as tfma
from tensorflow_model_analysis.proto import config_pb2

from tfx.components.evaluator import component
from tfx.orchestration import data_types
Expand Down Expand Up @@ -102,8 +103,8 @@ def testConstructWithEvalConfig(self):
evaluator = component.Evaluator(
examples=channel_utils.as_channel([examples]),
model=channel_utils.as_channel([model_exports]),
eval_config=tfma.EvalConfig(
slicing_specs=[tfma.SlicingSpec(feature_keys=['trip_start_hour'])]),
eval_config=config_pb2.EvalConfig(
slicing_specs=[tfma.proto.config_pb2.SlicingSpec(feature_keys=['trip_start_hour'])]),
schema=channel_utils.as_channel([schema]),)
self.assertEqual(standard_artifacts.ModelEvaluation.TYPE_NAME,
evaluator.outputs['evaluation'].type_name)
Expand Down
5 changes: 3 additions & 2 deletions tfx/components/evaluator/executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from absl import logging
import apache_beam as beam
import tensorflow_model_analysis as tfma
from tensorflow_model_analysis.proto import config_pb2
# Need to import the following module so that the fairness indicator post-export
# metric is registered.
from tfx import types
Expand Down Expand Up @@ -74,7 +75,7 @@ def Do(self, input_dict: Dict[str, List[types.Artifact]],
output_dict: Output dict from output key to a list of Artifacts.
- evaluation: model evaluation results.
exec_properties: A dict of execution properties.
- eval_config: JSON string of tfma.EvalConfig.
- eval_config: JSON string of config_pb2.EvalConfig.
- feature_slicing_spec: JSON string of evaluator_pb2.FeatureSlicingSpec
instance, providing the way to slice the data. Deprecated, use
eval_config.slicing_specs instead.
Expand Down Expand Up @@ -126,7 +127,7 @@ def Do(self, input_dict: Dict[str, List[types.Artifact]],
slice_spec = None
has_baseline = bool(
input_dict.get(standard_component_specs.BASELINE_MODEL_KEY))
eval_config = tfma.EvalConfig()
eval_config = config_pb2.EvalConfig()
proto_utils.json_to_proto(
exec_properties[standard_component_specs.EVAL_CONFIG_KEY],
eval_config)
Expand Down
81 changes: 41 additions & 40 deletions tfx/components/evaluator/executor_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
from absl.testing import parameterized
import tensorflow as tf
import tensorflow_model_analysis as tfma
from tensorflow_model_analysis.proto import config_pb2
from tfx.components.evaluator import executor
from tfx.components.testdata.module_file import evaluator_module
from tfx.dsl.io import fileio
Expand All @@ -40,18 +41,18 @@ class ExecutorTest(tf.test.TestCase, parameterized.TestCase):
('evaluation_w_eval_config', {
standard_component_specs.EVAL_CONFIG_KEY:
proto_utils.proto_to_json(
tfma.EvalConfig(slicing_specs=[
tfma.SlicingSpec(feature_keys=['trip_start_hour']),
tfma.SlicingSpec(
config_pb2.EvalConfig(slicing_specs=[
tfma.proto.config_pb2.SlicingSpec(feature_keys=['trip_start_hour']),
tfma.proto.config_pb2.SlicingSpec(
feature_keys=['trip_start_day', 'trip_miles']),
]))
}),
('evaluation_w_module_file', {
standard_component_specs.EVAL_CONFIG_KEY:
proto_utils.proto_to_json(
tfma.EvalConfig(slicing_specs=[
tfma.SlicingSpec(feature_keys=['trip_start_hour']),
tfma.SlicingSpec(
config_pb2.EvalConfig(slicing_specs=[
tfma.proto.config_pb2.SlicingSpec(feature_keys=['trip_start_hour']),
tfma.proto.config_pb2.SlicingSpec(
feature_keys=['trip_start_day', 'trip_miles']),
])),
standard_component_specs.MODULE_FILE_KEY:
Expand All @@ -60,9 +61,9 @@ class ExecutorTest(tf.test.TestCase, parameterized.TestCase):
('evaluation_w_module_path', {
standard_component_specs.EVAL_CONFIG_KEY:
proto_utils.proto_to_json(
tfma.EvalConfig(slicing_specs=[
tfma.SlicingSpec(feature_keys=['trip_start_hour']),
tfma.SlicingSpec(
config_pb2.EvalConfig(slicing_specs=[
tfma.proto.config_pb2.SlicingSpec(feature_keys=['trip_start_hour']),
tfma.proto.config_pb2.SlicingSpec(
feature_keys=['trip_start_day', 'trip_miles']),
])),
standard_component_specs.MODULE_PATH_KEY:
Expand All @@ -71,14 +72,14 @@ class ExecutorTest(tf.test.TestCase, parameterized.TestCase):
('model_agnostic_evaluation', {
standard_component_specs.EVAL_CONFIG_KEY:
proto_utils.proto_to_json(
tfma.EvalConfig(
config_pb2.EvalConfig(
model_specs=[
tfma.ModelSpec(
tfma.proto.config_pb2.ModelSpec(
label_key='tips', prediction_key='tips'),
],
slicing_specs=[
tfma.SlicingSpec(feature_keys=['trip_start_hour']),
tfma.SlicingSpec(
tfma.proto.config_pb2.SlicingSpec(feature_keys=['trip_start_hour']),
tfma.proto.config_pb2.SlicingSpec(
feature_keys=['trip_start_day', 'trip_miles']),
]))
}, True),
Expand Down Expand Up @@ -214,22 +215,22 @@ def testDoLegacySingleEvalSavedModelWFairness(self, exec_properties):
{
standard_component_specs.EVAL_CONFIG_KEY:
proto_utils.proto_to_json(
tfma.EvalConfig(
config_pb2.EvalConfig(
model_specs=[
tfma.ModelSpec(label_key='tips'),
tfma.proto.config_pb2.ModelSpec(label_key='tips'),
],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
tfma.proto.config_pb2.MetricsSpec(metrics=[
tfma.proto.config_pb2.MetricConfig(
class_name='ExampleCount',
# Count > 0, OK.
threshold=tfma.MetricThreshold(
threshold=tfma.proto.config_pb2.MetricThreshold(
value_threshold=tfma
.GenericValueThreshold(
.proto.config_pb2.GenericValueThreshold(
lower_bound={'value': 0}))),
]),
],
slicing_specs=[tfma.SlicingSpec()]))
slicing_specs=[tfma.proto.config_pb2.SlicingSpec()]))
},
True,
True),
Expand All @@ -238,27 +239,27 @@ def testDoLegacySingleEvalSavedModelWFairness(self, exec_properties):
{
standard_component_specs.EVAL_CONFIG_KEY:
proto_utils.proto_to_json(
tfma.EvalConfig(
config_pb2.EvalConfig(
model_specs=[
tfma.ModelSpec(
tfma.proto.config_pb2.ModelSpec(
name='baseline1',
label_key='tips',
is_baseline=True),
tfma.ModelSpec(
tfma.proto.config_pb2.ModelSpec(
name='candidate1', label_key='tips'),
],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
tfma.proto.config_pb2.MetricsSpec(metrics=[
tfma.proto.config_pb2.MetricConfig(
class_name='ExampleCount',
# Count < -1, NOT OK.
threshold=tfma.MetricThreshold(
threshold=tfma.proto.config_pb2.MetricThreshold(
value_threshold=tfma
.GenericValueThreshold(
.proto.config_pb2.GenericValueThreshold(
upper_bound={'value': -1}))),
]),
],
slicing_specs=[tfma.SlicingSpec()]))
slicing_specs=[tfma.proto.config_pb2.SlicingSpec()]))
},
False,
True),
Expand All @@ -267,36 +268,36 @@ def testDoLegacySingleEvalSavedModelWFairness(self, exec_properties):
{
standard_component_specs.EVAL_CONFIG_KEY:
proto_utils.proto_to_json(
tfma.EvalConfig(
config_pb2.EvalConfig(
model_specs=[
tfma.ModelSpec(
tfma.proto.config_pb2.ModelSpec(
name='baseline',
label_key='tips',
is_baseline=True),
tfma.ModelSpec(
tfma.proto.config_pb2.ModelSpec(
name='candidate', label_key='tips'),
],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
tfma.proto.config_pb2.MetricsSpec(metrics=[
tfma.proto.config_pb2.MetricConfig(
class_name='ExampleCount',
# Count > 0, OK.
threshold=tfma.MetricThreshold(
threshold=tfma.proto.config_pb2.MetricThreshold(
value_threshold=tfma
.GenericValueThreshold(
.proto.config_pb2.GenericValueThreshold(
lower_bound={'value': 0}))),
tfma.MetricConfig(
tfma.proto.config_pb2.MetricConfig(
class_name='Accuracy',
# Should be ignored due to no baseline.
threshold=tfma.MetricThreshold(
threshold=tfma.proto.config_pb2.MetricThreshold(
change_threshold=tfma
.GenericChangeThreshold(
.proto.config_pb2.GenericChangeThreshold(
relative={'value': 0},
direction=tfma.MetricDirection
direction=tfma.proto.config_pb2.MetricDirection
.LOWER_IS_BETTER))),
]),
],
slicing_specs=[tfma.SlicingSpec()]))
slicing_specs=[tfma.proto.config_pb2.SlicingSpec()]))
},
True,
False))
Expand Down
5 changes: 3 additions & 2 deletions tfx/components/testdata/module_file/evaluator_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from typing import Any, Dict, List

import tensorflow_model_analysis as tfma
from tensorflow_model_analysis.proto import config_pb2
from tfx_bsl.tfxio import tensor_adapter


Expand All @@ -35,7 +36,7 @@


def custom_eval_shared_model(eval_saved_model_path: str, model_name: str,
eval_config: tfma.EvalConfig,
eval_config: config_pb2.EvalConfig,
**kwargs: Dict[str, Any]) -> _EvalSharedModel:
return tfma.default_eval_shared_model(
eval_saved_model_path=eval_saved_model_path,
Expand All @@ -46,7 +47,7 @@ def custom_eval_shared_model(eval_saved_model_path: str, model_name: str,

def custom_extractors(
eval_shared_model: _MaybeMultipleEvalSharedModels,
eval_config: tfma.EvalConfig,
eval_config: config_pb2.EvalConfig,
tensor_adapter_config: tensor_adapter.TensorAdapterConfig,
) -> List[tfma.extractors.Extractor]:
return tfma.default_extractors(
Expand Down
17 changes: 9 additions & 8 deletions tfx/dsl/compiler/testdata/composable_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import os

import tensorflow_model_analysis as tfma
from tensorflow_model_analysis.proto import config_pb2
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import InfraValidator
Expand Down Expand Up @@ -145,18 +146,18 @@ def create_test_pipeline():
trigger_strategy=pipeline_pb2.NodeExecutionOptions.TriggerStrategy.LAZILY_ALL_UPSTREAM_NODES_SUCCEEDED,
)

eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name="eval")],
slicing_specs=[tfma.SlicingSpec()],
eval_config = config_pb2.EvalConfig(
model_specs=[tfma.proto.config_pb2.ModelSpec(signature_name="eval")],
slicing_specs=[tfma.proto.config_pb2.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(
tfma.proto.config_pb2.MetricsSpec(
thresholds={
"sparse_categorical_accuracy":
tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
tfma.proto.config_pb2.MetricThreshold(
value_threshold=tfma.proto.config_pb2.GenericValueThreshold(
lower_bound={"value": 0.6}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
change_threshold=tfma.proto.config_pb2.GenericChangeThreshold(
direction=tfma.proto.config_pb2.MetricDirection.HIGHER_IS_BETTER,
absolute={"value": -1e-10}))
})
])
Expand Down
Loading
Loading