From dccd743fbd8c622a2c1589f1cd3e4187ed6fb4f5 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 29 Oct 2025 14:10:45 +0000 Subject: [PATCH 1/5] Initial plan From 1be07682b008a4df9ac2f9ddf724079b7d9c730f Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 29 Oct 2025 14:24:30 +0000 Subject: [PATCH 2/5] Add tensorflow_serving_protos directory with proto files and update BUILD dependencies Co-authored-by: atobiszei <36039266+atobiszei@users.noreply.github.com> --- BUILD.bazel | 6 +- WORKSPACE | 7 + src/BUILD | 5 +- third_party/tensorflow_serving_protos/BUILD | 189 ++++ .../tensorflow/core/example/example.proto | 301 ++++++ .../example_parser_configuration.proto | 40 + .../tensorflow/core/example/feature.proto | 110 ++ .../framework/allocation_description.proto | 29 + .../tensorflow/core/framework/api_def.proto | 138 +++ .../core/framework/attr_value.proto | 64 ++ .../core/framework/cost_graph.proto | 89 ++ .../core/framework/cpp_shape_inference.proto | 36 + .../tensorflow/core/framework/dataset.proto | 47 + .../core/framework/dataset_metadata.proto | 10 + .../core/framework/dataset_options.proto | 262 +++++ .../core/framework/device_attributes.proto | 58 ++ .../tensorflow/core/framework/full_type.proto | 310 ++++++ .../tensorflow/core/framework/function.proto | 136 +++ .../tensorflow/core/framework/graph.proto | 60 ++ .../core/framework/graph_debug_info.proto | 61 ++ .../core/framework/graph_transfer_info.proto | 71 ++ .../core/framework/kernel_def.proto | 48 + .../core/framework/log_memory.proto | 95 ++ .../tensorflow/core/framework/model.proto | 141 +++ .../tensorflow/core/framework/node_def.proto | 95 ++ .../tensorflow/core/framework/op_def.proto | 193 ++++ .../framework/optimized_function_graph.proto | 48 + .../core/framework/reader_base.proto | 18 + .../core/framework/resource_handle.proto | 47 + .../core/framework/step_stats.proto | 88 ++ .../tensorflow/core/framework/summary.proto | 133 +++ .../tensorflow/core/framework/tensor.proto | 101 ++ .../core/framework/tensor_description.proto | 24 + .../core/framework/tensor_shape.proto | 46 + .../core/framework/tensor_slice.proto | 39 + .../tensorflow/core/framework/types.proto | 100 ++ .../tensorflow/core/framework/variable.proto | 84 ++ .../tensorflow/core/framework/versions.proto | 33 + .../tensorflow/core/protobuf/autotuning.proto | 7 + .../core/protobuf/bfc_memory_map.proto | 7 + .../tensorflow/core/protobuf/cluster.proto | 87 ++ .../protobuf/composite_tensor_variant.proto | 16 + .../tensorflow/core/protobuf/config.proto | 985 ++++++++++++++++++ .../core/protobuf/control_flow.proto | 91 ++ .../core/protobuf/conv_autotuning.proto | 54 + .../protobuf/core_platform_payloads.proto | 25 + .../core/protobuf/critical_section.proto | 24 + .../core/protobuf/data_service.proto | 92 ++ .../tensorflow/core/protobuf/debug.proto | 94 ++ .../core/protobuf/debug_event.proto | 300 ++++++ .../core/protobuf/device_filters.proto | 73 ++ .../core/protobuf/device_properties.proto | 58 ++ .../core/protobuf/eager_service.proto | 370 +++++++ .../core/protobuf/error_codes.proto | 11 + .../core/protobuf/fingerprint.proto | 31 + .../tensorflow/core/protobuf/master.proto | 353 +++++++ .../core/protobuf/master_service.proto | 121 +++ .../tensorflow/core/protobuf/meta_graph.proto | 286 +++++ .../core/protobuf/named_tensor.proto | 25 + .../core/protobuf/queue_runner.proto | 30 + .../core/protobuf/remote_tensor_handle.proto | 34 + .../tensorflow/core/protobuf/replay_log.proto | 47 + .../core/protobuf/rewriter_config.proto | 241 +++++ .../core/protobuf/rpc_options.proto | 7 + .../core/protobuf/saved_model.proto | 23 + .../core/protobuf/saved_object_graph.proto | 251 +++++ .../tensorflow/core/protobuf/saver.proto | 48 + .../core/protobuf/service_config.proto | 111 ++ .../tensorflow/core/protobuf/snapshot.proto | 58 ++ .../tensorflow/core/protobuf/status.proto | 11 + .../tensorflow/core/protobuf/struct.proto | 164 +++ .../core/protobuf/tensor_bundle.proto | 66 ++ .../core/protobuf/tensorflow_server.proto | 64 ++ .../protobuf/trackable_object_graph.proto | 80 ++ .../core/protobuf/transport_options.proto | 10 + .../core/protobuf/verifier_config.proto | 27 + .../tensorflow/core/protobuf/worker.proto | 610 +++++++++++ .../core/protobuf/worker_service.proto | 126 +++ .../apis/classification.proto | 48 + .../apis/get_model_metadata.proto | 30 + .../apis/get_model_status.proto | 68 ++ .../tensorflow_serving/apis/inference.proto | 59 ++ .../tensorflow_serving/apis/input.proto | 82 ++ .../tensorflow_serving/apis/logging.proto | 20 + .../tensorflow_serving/apis/model.proto | 36 + .../apis/model_management.proto | 25 + .../apis/model_service.proto | 24 + .../tensorflow_serving/apis/predict.proto | 183 ++++ .../apis/prediction_log.proto | 55 + .../apis/prediction_service.proto | 31 + .../tensorflow_serving/apis/regression.proto | 37 + .../apis/session_service.proto | 56 + .../tensorflow_serving/apis/status.proto | 17 + .../file_system_storage_path_source.proto | 83 ++ .../config/log_collector_config.proto | 12 + .../config/logging_config.proto | 29 + .../config/model_server_config.proto | 85 ++ .../config/monitoring_config.proto | 19 + .../config/platform_config.proto | 19 + .../config/ssl_config.proto | 16 + 100 files changed, 9509 insertions(+), 5 deletions(-) create mode 100644 third_party/tensorflow_serving_protos/BUILD create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/example/example.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/example/example_parser_configuration.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/example/feature.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/allocation_description.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/api_def.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/attr_value.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/cost_graph.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/cpp_shape_inference.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/dataset.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/dataset_metadata.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/dataset_options.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/device_attributes.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/full_type.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/function.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/graph.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/graph_debug_info.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/graph_transfer_info.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/kernel_def.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/log_memory.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/model.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/node_def.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/op_def.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/optimized_function_graph.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/reader_base.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/resource_handle.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/step_stats.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/summary.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/tensor.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/tensor_description.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/tensor_shape.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/tensor_slice.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/types.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/variable.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/framework/versions.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/autotuning.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/bfc_memory_map.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/cluster.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/composite_tensor_variant.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/config.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/control_flow.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/conv_autotuning.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/core_platform_payloads.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/critical_section.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/data_service.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/debug.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/debug_event.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/device_filters.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/device_properties.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/eager_service.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/error_codes.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/fingerprint.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/master.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/master_service.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/meta_graph.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/named_tensor.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/queue_runner.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/remote_tensor_handle.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/replay_log.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/rewriter_config.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/rpc_options.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/saved_model.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/saved_object_graph.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/saver.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/service_config.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/snapshot.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/status.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/struct.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/tensor_bundle.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/tensorflow_server.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/trackable_object_graph.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/transport_options.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/verifier_config.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/worker.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow/core/protobuf/worker_service.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow_serving/apis/classification.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow_serving/apis/get_model_metadata.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow_serving/apis/get_model_status.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow_serving/apis/inference.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow_serving/apis/input.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow_serving/apis/logging.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow_serving/apis/model.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow_serving/apis/model_management.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow_serving/apis/model_service.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow_serving/apis/predict.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow_serving/apis/prediction_log.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow_serving/apis/prediction_service.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow_serving/apis/regression.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow_serving/apis/session_service.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow_serving/apis/status.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow_serving/config/file_system_storage_path_source.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow_serving/config/log_collector_config.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow_serving/config/logging_config.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow_serving/config/model_server_config.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow_serving/config/monitoring_config.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow_serving/config/platform_config.proto create mode 100644 third_party/tensorflow_serving_protos/tensorflow_serving/config/ssl_config.proto diff --git a/BUILD.bazel b/BUILD.bazel index aebd22244f..40ce84efa2 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -33,8 +33,10 @@ create_config_settings() cc_library( name = "ovms_dependencies", deps = [ - "@tensorflow_serving//tensorflow_serving/apis:prediction_service_cc_proto", - "@tensorflow_serving//tensorflow_serving/apis:model_service_cc_proto", + "@tensorflow_serving_protos//:prediction_service_cc_proto", + "@tensorflow_serving_protos//:prediction_service_cc_grpc", + "@tensorflow_serving_protos//:model_service_cc_proto", + "@tensorflow_serving_protos//:model_service_cc_grpc", "@minitrace//:trace", "@com_github_grpc_grpc//:grpc++", "@com_github_tencent_rapidjson//:rapidjson", diff --git a/WORKSPACE b/WORKSPACE index 4f5e23a9ee..3bb7bd14ee 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -158,6 +158,13 @@ cc_library( ) +# TensorFlow Serving API protos only (no build dependencies) +new_local_repository( + name = "tensorflow_serving_protos", + build_file = "@//third_party/tensorflow_serving_protos:BUILD", + path = "third_party/tensorflow_serving_protos", +) + # Used for gRPC API protos only # Tensorflow serving git_repository( diff --git a/src/BUILD b/src/BUILD index bf87fd34cd..d3443e30b9 100644 --- a/src/BUILD +++ b/src/BUILD @@ -1070,8 +1070,7 @@ ovms_cc_library( srcs = [ "tfs_frontend/tfs_utils.cpp",], deps = [ - "@tensorflow_serving//tensorflow_serving/apis:prediction_service_cc_proto", - "@tensorflow_serving//tensorflow_serving/apis:model_service_cc_proto", + "@tensorflow_serving_protos//:tensorflow_serving_apis_cc_proto", "@org_tensorflow//tensorflow/core:framework", # Eigen Tensor "libovmslogging", "libovmsprofiler", @@ -1100,7 +1099,7 @@ ovms_cc_library( # TODO split dependencies deps = [ "@com_github_jupp0r_prometheus_cpp//core", "@mediapipe//mediapipe/framework:calculator_framework", - "@tensorflow_serving//tensorflow_serving/apis:prediction_service_cc_proto", + "@tensorflow_serving_protos//:tensorflow_serving_apis_cc_proto", "@tensorflow_serving//tensorflow_serving/util:threadpool_executor", "@tensorflow_serving//tensorflow_serving/util:json_tensor", "libovms_module", diff --git a/third_party/tensorflow_serving_protos/BUILD b/third_party/tensorflow_serving_protos/BUILD new file mode 100644 index 0000000000..f150d25bd5 --- /dev/null +++ b/third_party/tensorflow_serving_protos/BUILD @@ -0,0 +1,189 @@ +# +# Copyright (c) 2025 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# TensorFlow Serving API protobuf definitions +# This package contains only the proto files needed for TensorFlow Serving API compatibility +# without requiring the full TensorFlow or TensorFlow Serving build dependencies. + +load("@com_google_protobuf//:protobuf.bzl", "cc_proto_library", "py_proto_library") +load("@com_github_grpc_grpc//bazel:cc_grpc_library.bzl", "cc_grpc_library") + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +# TensorFlow Core Framework protos +proto_library( + name = "tensorflow_core_framework_protos", + srcs = glob(["tensorflow/core/framework/*.proto"]), + deps = ["@com_google_protobuf//:cc_wkt_protos"], +) + +cc_proto_library( + name = "tensorflow_core_framework_cc_proto", + srcs = glob(["tensorflow/core/framework/*.proto"]), + deps = ["@com_google_protobuf//:cc_wkt_protos"], + default_runtime = "@com_google_protobuf//:protobuf", + protoc = "@com_google_protobuf//:protoc", +) + +# TensorFlow Core Example protos +proto_library( + name = "tensorflow_core_example_protos", + srcs = glob(["tensorflow/core/example/*.proto"]), +) + +cc_proto_library( + name = "tensorflow_core_example_cc_proto", + srcs = glob(["tensorflow/core/example/*.proto"]), + default_runtime = "@com_google_protobuf//:protobuf", + protoc = "@com_google_protobuf//:protoc", +) + +# TensorFlow Core Protobuf protos +proto_library( + name = "tensorflow_core_protobuf_protos", + srcs = glob(["tensorflow/core/protobuf/*.proto"]), + deps = [ + ":tensorflow_core_framework_protos", + "@com_google_protobuf//:cc_wkt_protos", + ], +) + +cc_proto_library( + name = "tensorflow_core_protobuf_cc_proto", + srcs = glob(["tensorflow/core/protobuf/*.proto"]), + deps = [ + ":tensorflow_core_framework_cc_proto", + "@com_google_protobuf//:cc_wkt_protos", + ], + default_runtime = "@com_google_protobuf//:protobuf", + protoc = "@com_google_protobuf//:protoc", +) + +# TensorFlow Serving Config protos +proto_library( + name = "tensorflow_serving_config_protos", + srcs = glob(["tensorflow_serving/config/*.proto"]), + deps = ["@com_google_protobuf//:cc_wkt_protos"], +) + +cc_proto_library( + name = "tensorflow_serving_config_cc_proto", + srcs = glob(["tensorflow_serving/config/*.proto"]), + deps = ["@com_google_protobuf//:cc_wkt_protos"], + default_runtime = "@com_google_protobuf//:protobuf", + protoc = "@com_google_protobuf//:protoc", +) + +# TensorFlow Serving API protos (non-service) +proto_library( + name = "tensorflow_serving_apis_protos", + srcs = [ + "tensorflow_serving/apis/classification.proto", + "tensorflow_serving/apis/get_model_metadata.proto", + "tensorflow_serving/apis/get_model_status.proto", + "tensorflow_serving/apis/inference.proto", + "tensorflow_serving/apis/input.proto", + "tensorflow_serving/apis/logging.proto", + "tensorflow_serving/apis/model.proto", + "tensorflow_serving/apis/model_management.proto", + "tensorflow_serving/apis/predict.proto", + "tensorflow_serving/apis/prediction_log.proto", + "tensorflow_serving/apis/regression.proto", + "tensorflow_serving/apis/session_service.proto", + "tensorflow_serving/apis/status.proto", + ], + deps = [ + ":tensorflow_core_example_protos", + ":tensorflow_core_framework_protos", + ":tensorflow_core_protobuf_protos", + ":tensorflow_serving_config_protos", + "@com_google_protobuf//:cc_wkt_protos", + ], +) + +cc_proto_library( + name = "tensorflow_serving_apis_cc_proto", + srcs = [ + "tensorflow_serving/apis/classification.proto", + "tensorflow_serving/apis/get_model_metadata.proto", + "tensorflow_serving/apis/get_model_status.proto", + "tensorflow_serving/apis/inference.proto", + "tensorflow_serving/apis/input.proto", + "tensorflow_serving/apis/logging.proto", + "tensorflow_serving/apis/model.proto", + "tensorflow_serving/apis/model_management.proto", + "tensorflow_serving/apis/predict.proto", + "tensorflow_serving/apis/prediction_log.proto", + "tensorflow_serving/apis/regression.proto", + "tensorflow_serving/apis/session_service.proto", + "tensorflow_serving/apis/status.proto", + ], + deps = [ + ":tensorflow_core_example_cc_proto", + ":tensorflow_core_framework_cc_proto", + ":tensorflow_core_protobuf_cc_proto", + ":tensorflow_serving_config_cc_proto", + "@com_google_protobuf//:cc_wkt_protos", + ], + default_runtime = "@com_google_protobuf//:protobuf", + protoc = "@com_google_protobuf//:protoc", +) + +# Prediction Service proto (with gRPC service) +proto_library( + name = "prediction_service_proto", + srcs = ["tensorflow_serving/apis/prediction_service.proto"], + deps = [":tensorflow_serving_apis_protos"], +) + +cc_proto_library( + name = "prediction_service_cc_proto", + srcs = ["tensorflow_serving/apis/prediction_service.proto"], + deps = [":tensorflow_serving_apis_cc_proto"], + default_runtime = "@com_google_protobuf//:protobuf", + protoc = "@com_google_protobuf//:protoc", +) + +cc_grpc_library( + name = "prediction_service_cc_grpc", + srcs = [":prediction_service_proto"], + grpc_only = True, + deps = [":prediction_service_cc_proto"], +) + +# Model Service proto (with gRPC service) +proto_library( + name = "model_service_proto", + srcs = ["tensorflow_serving/apis/model_service.proto"], + deps = [":tensorflow_serving_apis_protos"], +) + +cc_proto_library( + name = "model_service_cc_proto", + srcs = ["tensorflow_serving/apis/model_service.proto"], + deps = [":tensorflow_serving_apis_cc_proto"], + default_runtime = "@com_google_protobuf//:protobuf", + protoc = "@com_google_protobuf//:protoc", +) + +cc_grpc_library( + name = "model_service_cc_grpc", + srcs = [":model_service_proto"], + grpc_only = True, + deps = [":model_service_cc_proto"], +) diff --git a/third_party/tensorflow_serving_protos/tensorflow/core/example/example.proto b/third_party/tensorflow_serving_protos/tensorflow/core/example/example.proto new file mode 100644 index 0000000000..9f762fb511 --- /dev/null +++ b/third_party/tensorflow_serving_protos/tensorflow/core/example/example.proto @@ -0,0 +1,301 @@ +// Protocol messages for describing input data Examples for machine learning +// model training or inference. +syntax = "proto3"; + +package tensorflow; + +import "tensorflow/core/example/feature.proto"; + +option cc_enable_arenas = true; +option java_outer_classname = "ExampleProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.example"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/example/example_protos_go_proto"; + +// An Example is a mostly-normalized data format for storing data for +// training and inference. It contains a key-value store (features); where +// each key (string) maps to a Feature message (which is oneof packed BytesList, +// FloatList, or Int64List). This flexible and compact format allows the +// storage of large amounts of typed data, but requires that the data shape +// and use be determined by the configuration files and parsers that are used to +// read and write this format. That is, the Example is mostly *not* a +// self-describing format. In TensorFlow, Examples are read in row-major +// format, so any configuration that describes data with rank-2 or above +// should keep this in mind. If you flatten a matrix into a FloatList it should +// be stored as [ row 0 ... row 1 ... row M-1 ] +// +// An Example for a movie recommendation application: +// features { +// feature { +// key: "age" +// value { float_list { +// value: 29.0 +// }} +// } +// feature { +// key: "movie" +// value { bytes_list { +// value: "The Shawshank Redemption" +// value: "Fight Club" +// }} +// } +// feature { +// key: "movie_ratings" +// value { float_list { +// value: 9.0 +// value: 9.7 +// }} +// } +// feature { +// key: "suggestion" +// value { bytes_list { +// value: "Inception" +// }} +// } +// # Note that this feature exists to be used as a label in training. +// # E.g., if training a logistic regression model to predict purchase +// # probability in our learning tool we would set the label feature to +// # "suggestion_purchased". +// feature { +// key: "suggestion_purchased" +// value { float_list { +// value: 1.0 +// }} +// } +// # Similar to "suggestion_purchased" above this feature exists to be used +// # as a label in training. +// # E.g., if training a linear regression model to predict purchase +// # price in our learning tool we would set the label feature to +// # "purchase_price". +// feature { +// key: "purchase_price" +// value { float_list { +// value: 9.99 +// }} +// } +// } +// +// A conformant Example data set obeys the following conventions: +// - If a Feature K exists in one example with data type T, it must be of +// type T in all other examples when present. It may be omitted. +// - The number of instances of Feature K list data may vary across examples, +// depending on the requirements of the model. +// - If a Feature K doesn't exist in an example, a K-specific default will be +// used, if configured. +// - If a Feature K exists in an example but contains no items, the intent +// is considered to be an empty tensor and no default will be used. + +message Example { + Features features = 1; +} + +// A SequenceExample is an Example representing one or more sequences, and +// some context. The context contains features which apply to the entire +// example. The feature_lists contain a key, value map where each key is +// associated with a repeated set of Features (a FeatureList). +// A FeatureList thus represents the values of a feature identified by its key +// over time / frames. +// +// Below is a SequenceExample for a movie recommendation application recording a +// sequence of ratings by a user. The time-independent features ("locale", +// "age", "favorites") describing the user are part of the context. The sequence +// of movies the user rated are part of the feature_lists. For each movie in the +// sequence we have information on its name and actors and the user's rating. +// This information is recorded in three separate feature_list(s). +// In the example below there are only two movies. All three feature_list(s), +// namely "movie_ratings", "movie_names", and "actors" have a feature value for +// both movies. Note, that "actors" is itself a bytes_list with multiple +// strings per movie. +// +// context: { +// feature: { +// key : "locale" +// value: { +// bytes_list: { +// value: [ "pt_BR" ] +// } +// } +// } +// feature: { +// key : "age" +// value: { +// float_list: { +// value: [ 19.0 ] +// } +// } +// } +// feature: { +// key : "favorites" +// value: { +// bytes_list: { +// value: [ "Majesty Rose", "Savannah Outen", "One Direction" ] +// } +// } +// } +// } +// feature_lists: { +// feature_list: { +// key : "movie_ratings" +// value: { +// feature: { +// float_list: { +// value: [ 4.5 ] +// } +// } +// feature: { +// float_list: { +// value: [ 5.0 ] +// } +// } +// } +// } +// feature_list: { +// key : "movie_names" +// value: { +// feature: { +// bytes_list: { +// value: [ "The Shawshank Redemption" ] +// } +// } +// feature: { +// bytes_list: { +// value: [ "Fight Club" ] +// } +// } +// } +// } +// feature_list: { +// key : "actors" +// value: { +// feature: { +// bytes_list: { +// value: [ "Tim Robbins", "Morgan Freeman" ] +// } +// } +// feature: { +// bytes_list: { +// value: [ "Brad Pitt", "Edward Norton", "Helena Bonham Carter" ] +// } +// } +// } +// } +// } +// +// A conformant SequenceExample data set obeys the following conventions: +// +// Context: +// - All conformant context features K must obey the same conventions as +// a conformant Example's features (see above). +// Feature lists: +// - A FeatureList L may be missing in an example; it is up to the +// parser configuration to determine if this is allowed or considered +// an empty list (zero length). +// - If a FeatureList L exists, it may be empty (zero length). +// - If a FeatureList L is non-empty, all features within the FeatureList +// must have the same data type T. Even across SequenceExamples, the type T +// of the FeatureList identified by the same key must be the same. An entry +// without any values may serve as an empty feature. +// - If a FeatureList L is non-empty, it is up to the parser configuration +// to determine if all features within the FeatureList must +// have the same size. The same holds for this FeatureList across multiple +// examples. +// - For sequence modeling, e.g.: +// http://colah.github.io/posts/2015-08-Understanding-LSTMs/ +// https://github.com/tensorflow/nmt +// the feature lists represent a sequence of frames. +// In this scenario, all FeatureLists in a SequenceExample have the same +// number of Feature messages, so that the ith element in each FeatureList +// is part of the ith frame (or time step). +// Examples of conformant and non-conformant examples' FeatureLists: +// +// Conformant FeatureLists: +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { feature: { float_list: { value: [ 4.5 ] } } +// feature: { float_list: { value: [ 5.0 ] } } } +// } } +// +// Non-conformant FeatureLists (mismatched types): +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { feature: { float_list: { value: [ 4.5 ] } } +// feature: { int64_list: { value: [ 5 ] } } } +// } } +// +// Conditionally conformant FeatureLists, the parser configuration determines +// if the feature sizes must match: +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { feature: { float_list: { value: [ 4.5 ] } } +// feature: { float_list: { value: [ 5.0, 6.0 ] } } } +// } } +// +// Conformant pair of SequenceExample +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { feature: { float_list: { value: [ 4.5 ] } } +// feature: { float_list: { value: [ 5.0 ] } } } +// } } +// and: +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { feature: { float_list: { value: [ 4.5 ] } } +// feature: { float_list: { value: [ 5.0 ] } } +// feature: { float_list: { value: [ 2.0 ] } } } +// } } +// +// Conformant pair of SequenceExample +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { feature: { float_list: { value: [ 4.5 ] } } +// feature: { float_list: { value: [ 5.0 ] } } } +// } } +// and: +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { } +// } } +// +// Conditionally conformant pair of SequenceExample, the parser configuration +// determines if the second feature_lists is consistent (zero-length) or +// invalid (missing "movie_ratings"): +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { feature: { float_list: { value: [ 4.5 ] } } +// feature: { float_list: { value: [ 5.0 ] } } } +// } } +// and: +// feature_lists: { } +// +// Non-conformant pair of SequenceExample (mismatched types) +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { feature: { float_list: { value: [ 4.5 ] } } +// feature: { float_list: { value: [ 5.0 ] } } } +// } } +// and: +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { feature: { int64_list: { value: [ 4 ] } } +// feature: { int64_list: { value: [ 5 ] } } +// feature: { int64_list: { value: [ 2 ] } } } +// } } +// +// Conditionally conformant pair of SequenceExample; the parser configuration +// determines if the feature sizes must match: +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { feature: { float_list: { value: [ 4.5 ] } } +// feature: { float_list: { value: [ 5.0 ] } } } +// } } +// and: +// feature_lists: { feature_list: { +// key: "movie_ratings" +// value: { feature: { float_list: { value: [ 4.0 ] } } +// feature: { float_list: { value: [ 5.0, 3.0 ] } } +// } } + +message SequenceExample { + Features context = 1; + FeatureLists feature_lists = 2; +} diff --git a/third_party/tensorflow_serving_protos/tensorflow/core/example/example_parser_configuration.proto b/third_party/tensorflow_serving_protos/tensorflow/core/example/example_parser_configuration.proto new file mode 100644 index 0000000000..0af0f4b44c --- /dev/null +++ b/third_party/tensorflow_serving_protos/tensorflow/core/example/example_parser_configuration.proto @@ -0,0 +1,40 @@ +// Protocol messages for describing the configuration of the ExampleParserOp. + +syntax = "proto3"; + +package tensorflow; + +import "tensorflow/core/framework/tensor.proto"; +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/types.proto"; + +option cc_enable_arenas = true; +option java_outer_classname = "ExampleParserConfigurationProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.example"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/example/example_parser_configuration_go_proto"; + +message VarLenFeatureProto { + tensorflow.DataType dtype = 1; + string values_output_tensor_name = 2; + string indices_output_tensor_name = 3; + string shapes_output_tensor_name = 4; +} + +message FixedLenFeatureProto { + tensorflow.DataType dtype = 1; + tensorflow.TensorShapeProto shape = 2; + tensorflow.TensorProto default_value = 3; + string values_output_tensor_name = 4; +} + +message FeatureConfiguration { + oneof config { + FixedLenFeatureProto fixed_len_feature = 1; + VarLenFeatureProto var_len_feature = 2; + } +} + +message ExampleParserConfiguration { + map feature_map = 1; +} diff --git a/third_party/tensorflow_serving_protos/tensorflow/core/example/feature.proto b/third_party/tensorflow_serving_protos/tensorflow/core/example/feature.proto new file mode 100644 index 0000000000..7f9fad9823 --- /dev/null +++ b/third_party/tensorflow_serving_protos/tensorflow/core/example/feature.proto @@ -0,0 +1,110 @@ +// Protocol messages for describing features for machine learning model +// training or inference. +// +// There are three base Feature types: +// - bytes +// - float +// - int64 +// +// A Feature contains Lists which may hold zero or more values. These +// lists are the base values BytesList, FloatList, Int64List. +// +// Features are organized into categories by name. The Features message +// contains the mapping from name to Feature. +// +// Example Features for a movie recommendation application: +// feature { +// key: "age" +// value { float_list { +// value: 29.0 +// }} +// } +// feature { +// key: "movie" +// value { bytes_list { +// value: "The Shawshank Redemption" +// value: "Fight Club" +// }} +// } +// feature { +// key: "movie_ratings" +// value { float_list { +// value: 9.0 +// value: 9.7 +// }} +// } +// feature { +// key: "suggestion" +// value { bytes_list { +// value: "Inception" +// }} +// } +// feature { +// key: "suggestion_purchased" +// value { int64_list { +// value: 1 +// }} +// } +// feature { +// key: "purchase_price" +// value { float_list { +// value: 9.99 +// }} +// } +// + +syntax = "proto3"; + +package tensorflow; + +option cc_enable_arenas = true; +option java_outer_classname = "FeatureProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.example"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/example/example_protos_go_proto"; + +// LINT.IfChange +// Containers to hold repeated fundamental values. +message BytesList { + repeated bytes value = 1; +} +message FloatList { + repeated float value = 1 [packed = true]; +} +message Int64List { + repeated int64 value = 1 [packed = true]; +} + +// Containers for non-sequential data. +message Feature { + // Each feature can be exactly one kind. + oneof kind { + BytesList bytes_list = 1; + FloatList float_list = 2; + Int64List int64_list = 3; + } +} + +message Features { + // Map from feature name to feature. + map feature = 1; +} + +// Containers for sequential data. +// +// A FeatureList contains lists of Features. These may hold zero or more +// Feature values. +// +// FeatureLists are organized into categories by name. The FeatureLists message +// contains the mapping from name to FeatureList. +// +message FeatureList { + repeated Feature feature = 1; +} + +message FeatureLists { + // Map from feature name to feature list. + map feature_list = 1; +} +// LINT.ThenChange( +// https://www.tensorflow.org/code/tensorflow/python/training/training.py) diff --git a/third_party/tensorflow_serving_protos/tensorflow/core/framework/allocation_description.proto b/third_party/tensorflow_serving_protos/tensorflow/core/framework/allocation_description.proto new file mode 100644 index 0000000000..f18caa40b2 --- /dev/null +++ b/third_party/tensorflow_serving_protos/tensorflow/core/framework/allocation_description.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package tensorflow; + +option cc_enable_arenas = true; +option java_outer_classname = "AllocationDescriptionProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/allocation_description_go_proto"; + +message AllocationDescription { + // Total number of bytes requested + int64 requested_bytes = 1; + + // Total number of bytes allocated if known + int64 allocated_bytes = 2; + + // Name of the allocator used + string allocator_name = 3; + + // Identifier of the allocated buffer if known + int64 allocation_id = 4; + + // Set if this tensor only has one remaining reference + bool has_single_reference = 5; + + // Address of the allocation. + uint64 ptr = 6; +} diff --git a/third_party/tensorflow_serving_protos/tensorflow/core/framework/api_def.proto b/third_party/tensorflow_serving_protos/tensorflow/core/framework/api_def.proto new file mode 100644 index 0000000000..1823ce64f2 --- /dev/null +++ b/third_party/tensorflow_serving_protos/tensorflow/core/framework/api_def.proto @@ -0,0 +1,138 @@ +// Defines the text format for including per-op API definition and +// overrides for client language op code generators. + +syntax = "proto3"; + +package tensorflow; + +import "tensorflow/core/framework/attr_value.proto"; + +option cc_enable_arenas = true; +option java_outer_classname = "ApiDefProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/api_def_go_proto"; + +// Used to specify and override the default API & behavior in the +// generated code for client languages, from what you would get from +// the OpDef alone. There will be a set of ApiDefs that are common +// to all client languages, and another set per client language. +// The per-client-language ApiDefs will inherit values from the +// common ApiDefs which it can either replace or modify. +// +// We separate the API definition from the OpDef so we can evolve the +// API while remaining backwards compatible when interpreting old +// graphs. Overrides go in an "api_def.pbtxt" file with a text-format +// ApiDefs message. +// +// WARNING: Be *very* careful changing the API for any existing op -- +// you can change the semantics of existing code. These changes may +// need to wait until a major release of TensorFlow to avoid breaking +// our compatibility promises. +message ApiDef { + // Name of the op (in the OpDef) to specify the API for. + string graph_op_name = 1; + // If this op is deprecated, set deprecation message to the message + // that should be logged when this op is used. + // The message should indicate alternative op to use, if any. + string deprecation_message = 12; + // Major version when the op will be deleted. For e.g. set this + // value to 2 if op API should be removed in TensorFlow 2.0 and + // deprecated in versions before that. + int32 deprecation_version = 13; + + enum Visibility { + // Normally this is "VISIBLE" unless you are inheriting a + // different value from another ApiDef. + DEFAULT_VISIBILITY = 0; + // Publicly visible in the API. + VISIBLE = 1; + // Do not include this op in the generated API. If visibility is + // set to 'SKIP', other fields are ignored for this op. + SKIP = 2; + // Hide this op by putting it into an internal namespace (or whatever + // is appropriate in the target language). + HIDDEN = 3; + } + Visibility visibility = 2; + + // If you specify any endpoint, this will replace all of the + // inherited endpoints. The first endpoint should be the + // "canonical" endpoint, and should not be deprecated (unless all + // endpoints are deprecated). + message Endpoint { + // Name should be either like "CamelCaseName" or + // "Package.CamelCaseName". Client-language-specific ApiDefs may + // use a snake_case convention instead of CamelCase. + string name = 1; + + // Set if this endpoint is deprecated. If set to true, a message suggesting + // to use a non-deprecated endpoint instead will be printed. If all + // endpoints are deprecated, set deprecation_message in ApiDef instead. + bool deprecated = 3; + + // Major version when an endpoint will be deleted. For e.g. set this + // value to 2 if endpoint should be removed in TensorFlow 2.0 and + // deprecated in versions before that. + int32 deprecation_version = 4; + } + repeated Endpoint endpoint = 3; + + message Arg { + string name = 1; + + // Change the name used to access this arg in the API from what + // is used in the GraphDef. Note that these names in `backticks` + // will also be replaced in the summary & description fields. + string rename_to = 2; + + // Note: this will replace any inherited arg doc. There is no + // current way of modifying arg descriptions (other than replacing + // them entirely) as can be done with op descriptions. + string description = 3; + } + repeated Arg in_arg = 4; + repeated Arg out_arg = 5; + // List of original in_arg names to specify new argument order. + // Length of arg_order should be either empty to keep current order + // or match size of in_arg. + repeated string arg_order = 11; + + // Description of the graph-construction-time configuration of this + // Op. That is to say, this describes the attr fields that will + // be specified in the NodeDef. + message Attr { + string name = 1; + + // Change the name used to access this attr in the API from what + // is used in the GraphDef. Note that these names in `backticks` + // will also be replaced in the summary & description fields. + string rename_to = 2; + + // Specify a new default value to use for this attr. This default + // will be used when creating new graphs, as opposed to the + // default in the OpDef, which will be used when interpreting old + // GraphDefs. + AttrValue default_value = 3; + + // Note: this will replace any inherited attr doc, there is no current + // way of modifying attr descriptions as can be done with op descriptions. + string description = 4; + } + repeated Attr attr = 6; + + // One-line human-readable description of what the Op does. + string summary = 7; + + // Additional, longer human-readable description of what the Op does. + string description = 8; + + // Modify an existing/inherited description by adding text to the beginning + // or end. + string description_prefix = 9; + string description_suffix = 10; +} + +message ApiDefs { + repeated ApiDef op = 1; +} diff --git a/third_party/tensorflow_serving_protos/tensorflow/core/framework/attr_value.proto b/third_party/tensorflow_serving_protos/tensorflow/core/framework/attr_value.proto new file mode 100644 index 0000000000..2bd5b552a3 --- /dev/null +++ b/third_party/tensorflow_serving_protos/tensorflow/core/framework/attr_value.proto @@ -0,0 +1,64 @@ +syntax = "proto3"; + +package tensorflow; + +import "tensorflow/core/framework/tensor.proto"; +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/types.proto"; + +option cc_enable_arenas = true; +option java_outer_classname = "AttrValueProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/attr_value_go_proto"; + +// Protocol buffer representing the value for an attr used to configure an Op. +// Comment indicates the corresponding attr type. Only the field matching the +// attr type may be filled. +message AttrValue { + // LINT.IfChange + message ListValue { + repeated bytes s = 2; // "list(string)" + repeated int64 i = 3 [packed = true]; // "list(int)" + repeated float f = 4 [packed = true]; // "list(float)" + repeated bool b = 5 [packed = true]; // "list(bool)" + repeated DataType type = 6 [packed = true]; // "list(type)" + repeated TensorShapeProto shape = 7; // "list(shape)" + repeated TensorProto tensor = 8; // "list(tensor)" + repeated NameAttrList func = 9; // "list(attr)" + } + // LINT.ThenChange(//tensorflow/c/c_api.cc) + + oneof value { + bytes s = 2; // "string" + int64 i = 3; // "int" + float f = 4; // "float" + bool b = 5; // "bool" + DataType type = 6; // "type" + TensorShapeProto shape = 7; // "shape" + TensorProto tensor = 8; // "tensor" + ListValue list = 1; // any "list(...)" + + // "func" represents a function. func.name is a function's name or + // a primitive op's name. func.attr.first is the name of an attr + // defined for that function. func.attr.second is the value for + // that attr in the instantiation. + NameAttrList func = 10; + + // This is a placeholder only used in nodes defined inside a + // function. It indicates the attr value will be supplied when + // the function is instantiated. For example, let us suppose a + // node "N" in function "FN". "N" has an attr "A" with value + // placeholder = "foo". When FN is instantiated with attr "foo" + // set to "bar", the instantiated node N's attr A will have been + // given the value "bar". + string placeholder = 9; + } +} + +// A list of attr names and their values. The whole list is attached +// with a string name. E.g., MatMul[T=float]. +message NameAttrList { + string name = 1; + map attr = 2; +} diff --git a/third_party/tensorflow_serving_protos/tensorflow/core/framework/cost_graph.proto b/third_party/tensorflow_serving_protos/tensorflow/core/framework/cost_graph.proto new file mode 100644 index 0000000000..42c9e23cfa --- /dev/null +++ b/third_party/tensorflow_serving_protos/tensorflow/core/framework/cost_graph.proto @@ -0,0 +1,89 @@ +syntax = "proto3"; + +package tensorflow; + +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/types.proto"; + +option cc_enable_arenas = true; +option java_outer_classname = "CostGraphProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/cost_graph_go_proto"; + +message CostGraphDef { + message Node { + // The name of the node. Names are globally unique. + string name = 1; + + // The device of the node. Can be empty if the node is mapped to the + // default partition or partitioning hasn't been run yet. + string device = 2; + + // The id of the node. Node ids are only unique inside a partition. + int32 id = 3; + + // Inputs of this node. They must be executed before this node can be + // executed. An input is a particular output of another node, specified + // by the node id and the output index. + message InputInfo { + int32 preceding_node = 1; + int32 preceding_port = 2; + } + repeated InputInfo input_info = 4; + + // Outputs of this node. + message OutputInfo { + int64 size = 1; + // If >= 0, the output is an alias of an input. Note that an alias input + // may itself be an alias. The algorithm will therefore need to follow + // those pointers. + int64 alias_input_port = 2; + TensorShapeProto shape = 3; + DataType dtype = 4; + } + repeated OutputInfo output_info = 5; + + // Temporary memory used by this node. + int64 temporary_memory_size = 6; + + // Persistent memory used by this node. + int64 persistent_memory_size = 12; + + int64 host_temp_memory_size = 10 [deprecated = true]; + int64 device_temp_memory_size = 11 [deprecated = true]; + int64 device_persistent_memory_size = 16 [deprecated = true]; + + // Estimate of the computational cost of this node, in microseconds. + int64 compute_cost = 9; + + // Analytical estimate of the computational cost of this node, in + // microseconds. + int64 compute_time = 14; + + // Analytical estimate of the memory access cost of this node, in + // microseconds. + int64 memory_time = 15; + + // If true, the output is permanent: it can't be discarded, because this + // node is part of the "final output". Nodes may depend on final nodes. + bool is_final = 7; + + // Ids of the control inputs for this node. + repeated int32 control_input = 8; + + // Are the costs inaccurate? + bool inaccurate = 17; + } + repeated Node node = 1; + + // Total cost of this graph, typically used for balancing decisions. + message AggregatedCost { + // Aggregated cost value. + float cost = 1; + + // Aggregated cost dimension (e.g. 'memory', 'compute', 'network'). + string dimension = 2; + } + repeated AggregatedCost cost = 2; +} diff --git a/third_party/tensorflow_serving_protos/tensorflow/core/framework/cpp_shape_inference.proto b/third_party/tensorflow_serving_protos/tensorflow/core/framework/cpp_shape_inference.proto new file mode 100644 index 0000000000..4cdbf5dd5c --- /dev/null +++ b/third_party/tensorflow_serving_protos/tensorflow/core/framework/cpp_shape_inference.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +package tensorflow.core; + +import "tensorflow/core/framework/full_type.proto"; +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/types.proto"; + +option cc_enable_arenas = true; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/python/framework/cpp_shape_inference_go_proto"; + +message CppShapeInferenceResult { + message HandleShapeAndType { + reserved 3; + + TensorShapeProto shape = 1; + DataType dtype = 2; + FullTypeDef type = 4; + } + message HandleData { + bool is_set = 1; + + // Only valid if . + repeated HandleShapeAndType shape_and_type = 2; + } + TensorShapeProto shape = 1; + + reserved 2; // was handle_shape + reserved 3; // was handle_dtype + HandleData handle_data = 4; +} + +message CppShapeInferenceInputsNeeded { + repeated int32 input_tensors_needed = 1; + repeated int32 input_tensors_as_shapes_needed = 2; +} diff --git a/third_party/tensorflow_serving_protos/tensorflow/core/framework/dataset.proto b/third_party/tensorflow_serving_protos/tensorflow/core/framework/dataset.proto new file mode 100644 index 0000000000..9dfd03b2dd --- /dev/null +++ b/third_party/tensorflow_serving_protos/tensorflow/core/framework/dataset.proto @@ -0,0 +1,47 @@ +syntax = "proto3"; + +package tensorflow.data; + +import "tensorflow/core/framework/tensor.proto"; +import "tensorflow/core/framework/tensor_shape.proto"; +import "tensorflow/core/framework/types.proto"; + +option cc_enable_arenas = true; + +// This file contains protocol buffers for working with tf.data Datasets. + +// Metadata describing a compressed component of a dataset element. +message CompressedComponentMetadata { + // The dtype of the component tensor. + .tensorflow.DataType dtype = 1; + + // The shape of the component tensor. + .tensorflow.TensorShapeProto tensor_shape = 2; + + // The amount of uncompressed tensor data. + // - For string tensors, there is an element for each string indicating the + // size of the string. + // - For all other tensors, there is a single element indicating the size of + // the tensor. + repeated uint64 uncompressed_bytes = 4; + + reserved 3; +} + +message CompressedElement { + // Compressed tensor bytes for all components of the element. + bytes data = 1; + // Metadata for the components of the element. + repeated CompressedComponentMetadata component_metadata = 2; + // Version of the CompressedElement. CompressedElements may be stored on disk + // and read back by later versions of code, so we store a version number to + // help readers understand which version they are reading. When you add a new + // field to this proto, you need to increment kCompressedElementVersion in + // tensorflow/core/data/compression_utils.cc. + int32 version = 3; +} + +// An uncompressed dataset element. +message UncompressedElement { + repeated TensorProto components = 1; +} diff --git a/third_party/tensorflow_serving_protos/tensorflow/core/framework/dataset_metadata.proto b/third_party/tensorflow_serving_protos/tensorflow/core/framework/dataset_metadata.proto new file mode 100644 index 0000000000..0e667dd48d --- /dev/null +++ b/third_party/tensorflow_serving_protos/tensorflow/core/framework/dataset_metadata.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; + +package tensorflow.data; + +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/dataset_metadata_go_proto"; + +// next: 2 +message Metadata { + bytes name = 1; +} diff --git a/third_party/tensorflow_serving_protos/tensorflow/core/framework/dataset_options.proto b/third_party/tensorflow_serving_protos/tensorflow/core/framework/dataset_options.proto new file mode 100644 index 0000000000..e76fcd471a --- /dev/null +++ b/third_party/tensorflow_serving_protos/tensorflow/core/framework/dataset_options.proto @@ -0,0 +1,262 @@ +syntax = "proto3"; + +package tensorflow.data; + +import "tensorflow/core/framework/model.proto"; + +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/dataset_options_go_proto"; + +// Represents the type of auto-sharding we enable. +enum AutoShardPolicy { + // AUTO: Attempts FILE-based sharding, falling back to DATA-based sharding. + AUTO = 0; + // FILE: Shards by input files (i.e. each worker will get a set of files to + // process). When this option is selected, make sure that there is at least as + // many files as workers. If there are fewer input files than workers, a + // runtime error will be raised. + FILE = 1; + // DATA: Shards by elements produced by the dataset. Each worker will process + // the whole dataset and discard the portion that is not for itself. Note that + // for this mode to correctly partitions the dataset elements, the dataset + // needs to produce elements in a deterministic order. + DATA = 2; + // HINT: Looks for the presence of `shard(SHARD_HINT, ...)` which is treated + // as a placeholder to replace with `shard(num_workers, worker_index)`. + HINT = 3; + // OFF: No sharding will be performed. + OFF = -1; +} + +// next: 6 +message AutotuneOptions { + // Whether to automatically tune performance knobs. + oneof optional_enabled { + bool enabled = 1; + } + // When autotuning is enabled (through autotune), determines the CPU budget to + // use. Values greater than the number of schedulable CPU cores are allowed + // but may result in CPU contention. + oneof optional_cpu_budget { + int32 cpu_budget = 2; + } + // When autotuning is enabled (through autotune), determines the RAM budget to + // use. Values greater than the available RAM in bytes may result in OOM. If + // 0, defaults to half of the available RAM in bytes. + oneof optional_ram_budget { + int64 ram_budget = 3; + } + + // When autotuning is enabled (through autotune), determines the algorithm to + // use. If not explicitly set by user, autotuning will follow HILL_CLIMB + // algorithm but has more flexibility to tune parameters more aggressively, + // in which case the behavior is implementation specific and may change over + // time. + oneof optional_autotune_algorithm { + model.AutotuneAlgorithm autotune_algorithm = 4; + } + + // The initial parallelism to use for parallel transformations before autotune + // has a chance to run. A higher value can help with quick startup, but may + // cause the ram_budget to temporarily be exceeded. Memory-sensitive datasets + // should consider setting this to `1` to avoid running out of memory. + // Defaults to 16. + oneof optional_initial_parallelism { + int64 initial_parallelism = 5; + } +} + +// next: 2 +message CardinalityOptions { + enum ComputeLevel { + CARDINALITY_COMPUTE_UNSPECIFIED = 0; + // Cardinality will only be computed if it can be determined in a cheap + // manner (ie. without reading from file sources). If the cardinality would + // be nontrivial to compute, Cardinality() will return UNKNOWN_CARDINALITY. + CARDINALITY_COMPUTE_LOW = 1; + // Moderate effort will be made to determine cardinality, such as reading + // index data from source files. If significant work is needed to compute + // cardinality (e.g. reading entire source file contents or executing user + // defined functions), Cardinality() will return UNKNOWN_CARDINALITY. + CARDINALITY_COMPUTE_MODERATE = 2; + } + ComputeLevel compute_level = 1; +} + +// next: 3 +message DistributeOptions { + AutoShardPolicy auto_shard_policy = 1; + // The number of devices attached to this input pipeline. + oneof optional_num_devices { + int32 num_devices = 2; + } +} + +// next: 22 +message OptimizationOptions { + // Whether to apply default graph optimizations. If False, only graph + // optimizations that have been explicitly enabled will be applied. + oneof optional_apply_default_optimizations { + bool apply_default_optimizations = 1; + } + reserved 2; + reserved 3; + reserved 4; + reserved 5; + // Whether to fuse filter transformations. + oneof optional_filter_fusion { + bool filter_fusion = 6; + } + // NOTE: field id 7 deleted in June 2021. + reserved 7; + // NOTE: field id 8 deleted in June 2021. + reserved 8; + // Whether to fuse map and batch transformations. + oneof optional_map_and_batch_fusion { + bool map_and_batch_fusion = 9; + } + // Whether to fuse map and filter transformations. + oneof optional_map_and_filter_fusion { + bool map_and_filter_fusion = 10; + } + // Whether to fuse map transformations. + oneof optional_map_fusion { + bool map_fusion = 11; + } + // Whether to parallelize stateless map transformations. + oneof optional_map_parallelization { + bool map_parallelization = 12; + } + + // NOTE: field id 13 deleted in June 2021. + reserved 13; + + // Whether to eliminate no-op transformations. + oneof optional_noop_elimination { + bool noop_elimination = 14; + } + // Whether to parallelize copying of batch elements. This optimization is + // highly experimental and can cause performance degradation (e.g. when the + // parallelization overhead exceeds the benefits of performing the data copies + // in parallel). You should only enable this optimization if a) your input + // pipeline is bottlenecked on batching and b) you have validated that this + // optimization improves performance. + oneof optional_parallel_batch { + bool parallel_batch = 15; + } + // Field id 16 was removed in 06/2021. + reserved 16; + // Whether to fuse shuffle and repeat transformations. + oneof optional_shuffle_and_repeat_fusion { + bool shuffle_and_repeat_fusion = 17; + } + // Whether to parallelize stateless filter transformations. + oneof optional_filter_parallelization { + bool filter_parallelization = 18; + } + // Whether to inject 'Prefetch' as the last transformation. Only takes effect + // if the last transformation is synchronous; otherwise does nothing. + oneof optional_inject_prefetch { + bool inject_prefetch = 19; + } + // NOTE: field id 20 was removed in August 2023. + reserved 20; + // Whether to replace parallel interleave with interleave and prefetch. Only + // takes effect if the parallel interleave is deterministic; otherwise does + // nothing. + oneof optional_seq_interleave_prefetch { + bool seq_interleave_prefetch = 21; + } +} + +// next: 2 +message ServiceOptions { + // If true, the tf.data service client allocates data to pinned memory, which + // faciliates more efficient copying from host memory to GPU memory + // downstream. + // - For gRPC, compression must be disabled for this to take effect. + // - For alternative data transfer protocols, this may or may not take effect, + // depending on the implementation. + oneof optional_pinned { + bool pinned = 1; + } +} + +// next: 3 +message ThreadingOptions { + // If set, it overrides the maximum degree of intra-op parallelism. + oneof optional_max_intra_op_parallelism { + int32 max_intra_op_parallelism = 1; + } + // If set, the dataset will use a private threadpool of the given size. + oneof optional_private_threadpool_size { + int32 private_threadpool_size = 2; + } +} + +// Represents how to handle external state during serialization. +enum ExternalStatePolicy { + POLICY_WARN = 0; + POLICY_IGNORE = 1; + POLICY_FAIL = 2; +} + +// Message stored with Dataset objects to control how datasets are processed and +// optimized. +// +// next: 13 +message Options { + // Optional name for the dataset. + oneof optional_dataset_name { + string dataset_name = 10; + } + // List of frameworks used to generate this dataset. + repeated string framework_type = 11; + // Whether the outputs need to be produced in deterministic order. + oneof optional_deterministic { + bool deterministic = 1; + } + // The autotune options associated with the dataset. + AutotuneOptions autotune_options = 7; + // The distribution strategy options associated with the dataset. + DistributeOptions distribute_options = 2; + // The optimization options associated with the dataset. + OptimizationOptions optimization_options = 3; + // The tf.data service options associated with the dataset. + ServiceOptions service_options = 12; + // Whether to introduce 'slack' in the last `prefetch` of the input pipeline, + // if it exists. This may reduce CPU contention with accelerator host-side + // activity at the start of a step. The slack frequency is determined by the + // number of devices attached to this input pipeline. + oneof optional_slack { + bool slack = 4; + } + // The threading options associated with the dataset. + ThreadingOptions threading_options = 5; + // This option can be used to override the default policy for how to handle + // external state when serializing a dataset or checkpointing its iterator. + // There are three settings available - IGNORE: External state is ignored + // without a warning; WARN: External state is ignored and a warning is logged; + // FAIL: External state results in an error. + oneof optional_external_state_policy { + ExternalStatePolicy external_state_policy = 6; + } + // This option indicates whether to checkpoint input pipeline state + // "explicitly", by storing the internal state of iterators for each + // tf.data operation, (the default), or "symbolically", by storing metadata + // that captures the state of each tf.data operation at the time it processed + // the last data seen by tf.data consumer. + // + // Symbolic checkpoints are expected to be much smaller but not all tf.data + // operations are compatible with symbolic checkpointing. In particular, + // symbolic checkpointing requires that data is processed in-order and + // operations that reorder elements, such as `shuffle()` or non-deterministic + // `map()`, are not compatible with symbolic checkpointing. + oneof optional_symbolic_checkpoint { + bool symbolic_checkpoint = 8; + } + // Whether to start background threads of asynchronous transformations upon + // iterator creation (as opposed to upon first call to `GetNext`). + oneof optional_warm_start { + bool warm_start = 9; + } +} diff --git a/third_party/tensorflow_serving_protos/tensorflow/core/framework/device_attributes.proto b/third_party/tensorflow_serving_protos/tensorflow/core/framework/device_attributes.proto new file mode 100644 index 0000000000..5f568e255f --- /dev/null +++ b/third_party/tensorflow_serving_protos/tensorflow/core/framework/device_attributes.proto @@ -0,0 +1,58 @@ +syntax = "proto3"; + +package tensorflow; + +option cc_enable_arenas = true; +option java_outer_classname = "DeviceAttributesProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/device_attributes_go_proto"; + +message InterconnectLink { + int32 device_id = 1; + string type = 2; + int32 strength = 3; +} + +message LocalLinks { + repeated InterconnectLink link = 1; +} + +message DeviceLocality { + // Optional bus locality of device. Default value of 0 means + // no specific locality. Specific localities are indexed from 1. + int32 bus_id = 1; + + // Optional NUMA locality of device. + int32 numa_node = 2; + + // Optional local interconnect links to other devices. + LocalLinks links = 3; +} + +message DeviceAttributes { + // Fully specified name of the device within a cluster. + string name = 1; + + // String representation of device_type. + string device_type = 2; + + // Memory capacity of device in bytes. + int64 memory_limit = 4; + + // Platform-specific data about device that may be useful + // for supporting efficient data transfers. + DeviceLocality locality = 5; + + // A device is assigned a global unique number each time it is + // initialized. "incarnation" should never be 0. + fixed64 incarnation = 6; + + // String representation of the physical device that this device maps to. + string physical_device_desc = 7; + + // A physical device ID for use in XLA DeviceAssignments, unique across + // clients in a multi-client setup. Set to -1 if unavailable, non-negative + // otherwise. + int64 xla_global_id = 8; +} diff --git a/third_party/tensorflow_serving_protos/tensorflow/core/framework/full_type.proto b/third_party/tensorflow_serving_protos/tensorflow/core/framework/full_type.proto new file mode 100644 index 0000000000..19e8da5ab7 --- /dev/null +++ b/third_party/tensorflow_serving_protos/tensorflow/core/framework/full_type.proto @@ -0,0 +1,310 @@ +syntax = "proto3"; + +package tensorflow; + +option cc_enable_arenas = true; +option java_outer_classname = "FullTypeProtos"; +option java_multiple_files = true; +option java_package = "org.tensorflow.framework"; +option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/full_type_go_proto"; + +// LINT.IfChange +// Experimental. Represents the complete type information of a TensorFlow value. +enum FullTypeId { + // The default represents an uninitialized values. + TFT_UNSET = 0; + + // Type symbols. Used to construct more complex type expressions like + // algebraic data types. + + // Type variables may serve as placeholder for any other type ID in type + // templates. + // + // Examples: + // TFT_DATASET[TFT_VAR["T"]] is a Dataset returning a type indicated by "T". + // TFT_TENSOR[TFT_VAR["T"]] is a Tensor of n element type indicated by "T". + // TFT_TENSOR[TFT_VAR["T"]], TFT_TENSOR[TFT_VAR["T"]] are two tensors of + // identical element types. + // TFT_TENSOR[TFT_VAR["P"]], TFT_TENSOR[TFT_VAR["Q"]] are two tensors of + // independent element types. + // + TFT_VAR = 1; + + // Wildcard type. Describes a parameter of unknown type. In TensorFlow, that + // can mean either a "Top" type (accepts any type), or a dynamically typed + // object whose type is unknown in context. + // Important: "unknown" does not necessarily mean undeterminable! + TFT_ANY = 2; + + // The algebraic product type. This is an algebraic type that may be used just + // for logical grouping. Not to confused with TFT_TUPLE which describes a + // concrete object of several elements. + // + // Example: + // TFT_DATASET[TFT_PRODUCT[TFT_TENSOR[TFT_INT32], TFT_TENSOR[TFT_FLOAT64]]] + // is a Dataset producing two tensors, an integer one and a float one. + // + TFT_PRODUCT = 3; + + // Represents a named field, with the name stored in the attribute. + // + // Parametrization: + // TFT_NAMED[]{} + // * is the type of the field + // * is the field name, as string (thpugh can theoretically be an int + // as well) + // + // Example: + // TFT_RECORD[ + // TFT_NAMED[TFT_TENSOR[TFT_INT32]]{'foo'}, + // TFT_NAMED[TFT_TENSOR[TFT_FLOAT32]]{'bar'}, + // ] + // is a structure with two fields, an int tensor "foo" and a float tensor + // "bar". + TFT_NAMED = 4; + + // Template definition. Expands the variables by repeating a template as + // arguments of container. + // + // Parametrization: + // TFT_FOR_EACH[,