From cddea179de74cf1c679371517583a608c206d9ee Mon Sep 17 00:00:00 2001
From: Tadayoshi Sato <sato.tadayoshi@gmail.com>
Date: Wed, 20 Nov 2024 19:39:34 +0900
Subject: [PATCH] wip - Initial commit

---
 .editorconfig                                 |   16 +
 .github/dependabot.yml                        |    6 +
 .github/workflows/test.yml                    |   37 +
 .gitignore                                    |   10 +
 .mvn/wrapper/maven-wrapper.properties         |   19 +
 LICENSE                                       |  201 ++++
 README.md                                     |  233 ++++
 examples/describe_model.java                  |   24 +
 examples/list_models.java                     |   22 +
 examples/metrics.java                         |   22 +
 examples/mnist.java                           |   31 +
 examples/register_mnist.java                  |   32 +
 examples/set_default.java                     |   24 +
 examples/token_auth.java                      |   30 +
 examples/unregister_mnist.java                |   25 +
 mvnw                                          |  259 +++++
 mvnw.cmd                                      |  149 +++
 pom.xml                                       |  166 +++
 .../serving/client/Configuration.java         |   65 ++
 .../serving/client/TensorFlowServingApi.java  |   20 +
 .../client/TensorFlowServingClient.java       |   86 ++
 src/main/proto/google/protobuf/any.proto      |  162 +++
 src/main/proto/google/protobuf/wrappers.proto |  123 ++
 .../tensorflow/core/example/example.proto     |  301 +++++
 .../tensorflow/core/example/feature.proto     |  110 ++
 .../framework/allocation_description.proto    |   29 +
 .../core/framework/attr_value.proto           |   64 ++
 .../core/framework/cost_graph.proto           |   89 ++
 .../tensorflow/core/framework/full_type.proto |  310 +++++
 .../tensorflow/core/framework/function.proto  |  136 +++
 .../tensorflow/core/framework/graph.proto     |   60 +
 .../core/framework/graph_debug_info.proto     |   61 +
 .../tensorflow/core/framework/node_def.proto  |   95 ++
 .../tensorflow/core/framework/op_def.proto    |  193 ++++
 .../core/framework/resource_handle.proto      |   47 +
 .../core/framework/step_stats.proto           |   88 ++
 .../tensorflow/core/framework/tensor.proto    |  101 ++
 .../core/framework/tensor_description.proto   |   24 +
 .../core/framework/tensor_shape.proto         |   46 +
 .../tensorflow/core/framework/types.proto     |  100 ++
 .../tensorflow/core/framework/variable.proto  |   84 ++
 .../tensorflow/core/framework/versions.proto  |   33 +
 .../tensorflow/core/protobuf/cluster.proto    |   87 ++
 .../tensorflow/core/protobuf/config.proto     | 1015 +++++++++++++++++
 .../tensorflow/core/protobuf/debug.proto      |   94 ++
 .../core/protobuf/error_codes.proto           |   11 +
 .../tensorflow/core/protobuf/meta_graph.proto |  286 +++++
 .../core/protobuf/named_tensor.proto          |   25 +
 .../core/protobuf/rewriter_config.proto       |  241 ++++
 .../core/protobuf/rpc_options.proto           |    7 +
 .../core/protobuf/saved_object_graph.proto    |  251 ++++
 .../tensorflow/core/protobuf/saver.proto      |   48 +
 .../tensorflow/core/protobuf/struct.proto     |  164 +++
 .../protobuf/trackable_object_graph.proto     |   80 ++
 .../core/protobuf/verifier_config.proto       |   27 +
 .../apis/classification.proto                 |   48 +
 .../apis/get_model_metadata.proto             |   30 +
 .../apis/get_model_status.proto               |   68 ++
 .../tensorflow_serving/apis/inference.proto   |   59 +
 .../proto/tensorflow_serving/apis/input.proto |   82 ++
 .../tensorflow_serving/apis/logging.proto     |   20 +
 .../proto/tensorflow_serving/apis/model.proto |   36 +
 .../apis/model_management.proto               |   25 +
 .../apis/model_service.proto                  |   24 +
 .../tensorflow_serving/apis/predict.proto     |  191 ++++
 .../apis/prediction_log.proto                 |   55 +
 .../apis/prediction_service.proto             |   31 +
 .../tensorflow_serving/apis/regression.proto  |   37 +
 .../apis/session_service.proto                |   56 +
 .../tensorflow_serving/apis/status.proto      |   17 +
 .../file_system_storage_path_source.proto     |   83 ++
 .../config/log_collector_config.proto         |   12 +
 .../config/logging_config.proto               |   29 +
 .../config/model_server_config.proto          |   85 ++
 .../config/monitoring_config.proto            |   19 +
 .../config/platform_config.proto              |   19 +
 .../config/ssl_config.proto                   |   16 +
 .../tsl/protobuf/coordination_config.proto    |   78 ++
 .../proto/xla/tsl/protobuf/error_codes.proto  |  155 +++
 .../proto/xla/tsl/protobuf/rpc_options.proto  |   41 +
 .../serving/client/ConfigurationTest.java     |   34 +
 .../client/TensorFlowServingClientTest.java   |   30 +
 .../client/TensorFlowServingTestSupport.java  |   31 +
 src/test/resources/data/0.png                 |  Bin 0 -> 272 bytes
 src/test/resources/data/1.png                 |  Bin 0 -> 181 bytes
 src/test/resources/data/kitten.jpg            |  Bin 0 -> 16016 bytes
 src/test/resources/log4j2.properties          |   17 +
 .../00000123/assets/foo.txt                   |    1 +
 .../00000123/saved_model.pb                   |  Bin 0 -> 12107 bytes
 .../variables/variables.data-00000-of-00001   |  Bin 0 -> 20 bytes
 .../00000123/variables/variables.index        |  Bin 0 -> 172 bytes
 src/test/resources/tfsc4j.properties          |    2 +
 92 files changed, 7500 insertions(+)
 create mode 100644 .editorconfig
 create mode 100644 .github/dependabot.yml
 create mode 100644 .github/workflows/test.yml
 create mode 100644 .gitignore
 create mode 100644 .mvn/wrapper/maven-wrapper.properties
 create mode 100644 LICENSE
 create mode 100644 README.md
 create mode 100755 examples/describe_model.java
 create mode 100755 examples/list_models.java
 create mode 100755 examples/metrics.java
 create mode 100755 examples/mnist.java
 create mode 100755 examples/register_mnist.java
 create mode 100755 examples/set_default.java
 create mode 100755 examples/token_auth.java
 create mode 100755 examples/unregister_mnist.java
 create mode 100755 mvnw
 create mode 100644 mvnw.cmd
 create mode 100644 pom.xml
 create mode 100644 src/main/java/com/github/tadayosi/tensorflow/serving/client/Configuration.java
 create mode 100644 src/main/java/com/github/tadayosi/tensorflow/serving/client/TensorFlowServingApi.java
 create mode 100644 src/main/java/com/github/tadayosi/tensorflow/serving/client/TensorFlowServingClient.java
 create mode 100644 src/main/proto/google/protobuf/any.proto
 create mode 100644 src/main/proto/google/protobuf/wrappers.proto
 create mode 100644 src/main/proto/tensorflow/core/example/example.proto
 create mode 100644 src/main/proto/tensorflow/core/example/feature.proto
 create mode 100644 src/main/proto/tensorflow/core/framework/allocation_description.proto
 create mode 100644 src/main/proto/tensorflow/core/framework/attr_value.proto
 create mode 100644 src/main/proto/tensorflow/core/framework/cost_graph.proto
 create mode 100644 src/main/proto/tensorflow/core/framework/full_type.proto
 create mode 100644 src/main/proto/tensorflow/core/framework/function.proto
 create mode 100644 src/main/proto/tensorflow/core/framework/graph.proto
 create mode 100644 src/main/proto/tensorflow/core/framework/graph_debug_info.proto
 create mode 100644 src/main/proto/tensorflow/core/framework/node_def.proto
 create mode 100644 src/main/proto/tensorflow/core/framework/op_def.proto
 create mode 100644 src/main/proto/tensorflow/core/framework/resource_handle.proto
 create mode 100644 src/main/proto/tensorflow/core/framework/step_stats.proto
 create mode 100644 src/main/proto/tensorflow/core/framework/tensor.proto
 create mode 100644 src/main/proto/tensorflow/core/framework/tensor_description.proto
 create mode 100644 src/main/proto/tensorflow/core/framework/tensor_shape.proto
 create mode 100644 src/main/proto/tensorflow/core/framework/types.proto
 create mode 100644 src/main/proto/tensorflow/core/framework/variable.proto
 create mode 100644 src/main/proto/tensorflow/core/framework/versions.proto
 create mode 100644 src/main/proto/tensorflow/core/protobuf/cluster.proto
 create mode 100644 src/main/proto/tensorflow/core/protobuf/config.proto
 create mode 100644 src/main/proto/tensorflow/core/protobuf/debug.proto
 create mode 100644 src/main/proto/tensorflow/core/protobuf/error_codes.proto
 create mode 100644 src/main/proto/tensorflow/core/protobuf/meta_graph.proto
 create mode 100644 src/main/proto/tensorflow/core/protobuf/named_tensor.proto
 create mode 100644 src/main/proto/tensorflow/core/protobuf/rewriter_config.proto
 create mode 100644 src/main/proto/tensorflow/core/protobuf/rpc_options.proto
 create mode 100644 src/main/proto/tensorflow/core/protobuf/saved_object_graph.proto
 create mode 100644 src/main/proto/tensorflow/core/protobuf/saver.proto
 create mode 100644 src/main/proto/tensorflow/core/protobuf/struct.proto
 create mode 100644 src/main/proto/tensorflow/core/protobuf/trackable_object_graph.proto
 create mode 100644 src/main/proto/tensorflow/core/protobuf/verifier_config.proto
 create mode 100644 src/main/proto/tensorflow_serving/apis/classification.proto
 create mode 100644 src/main/proto/tensorflow_serving/apis/get_model_metadata.proto
 create mode 100644 src/main/proto/tensorflow_serving/apis/get_model_status.proto
 create mode 100644 src/main/proto/tensorflow_serving/apis/inference.proto
 create mode 100644 src/main/proto/tensorflow_serving/apis/input.proto
 create mode 100644 src/main/proto/tensorflow_serving/apis/logging.proto
 create mode 100644 src/main/proto/tensorflow_serving/apis/model.proto
 create mode 100644 src/main/proto/tensorflow_serving/apis/model_management.proto
 create mode 100644 src/main/proto/tensorflow_serving/apis/model_service.proto
 create mode 100644 src/main/proto/tensorflow_serving/apis/predict.proto
 create mode 100644 src/main/proto/tensorflow_serving/apis/prediction_log.proto
 create mode 100644 src/main/proto/tensorflow_serving/apis/prediction_service.proto
 create mode 100644 src/main/proto/tensorflow_serving/apis/regression.proto
 create mode 100644 src/main/proto/tensorflow_serving/apis/session_service.proto
 create mode 100644 src/main/proto/tensorflow_serving/apis/status.proto
 create mode 100644 src/main/proto/tensorflow_serving/config/file_system_storage_path_source.proto
 create mode 100644 src/main/proto/tensorflow_serving/config/log_collector_config.proto
 create mode 100644 src/main/proto/tensorflow_serving/config/logging_config.proto
 create mode 100644 src/main/proto/tensorflow_serving/config/model_server_config.proto
 create mode 100644 src/main/proto/tensorflow_serving/config/monitoring_config.proto
 create mode 100644 src/main/proto/tensorflow_serving/config/platform_config.proto
 create mode 100644 src/main/proto/tensorflow_serving/config/ssl_config.proto
 create mode 100644 src/main/proto/xla/tsl/protobuf/coordination_config.proto
 create mode 100644 src/main/proto/xla/tsl/protobuf/error_codes.proto
 create mode 100644 src/main/proto/xla/tsl/protobuf/rpc_options.proto
 create mode 100644 src/test/java/com/github/tadayosi/tensorflow/serving/client/ConfigurationTest.java
 create mode 100644 src/test/java/com/github/tadayosi/tensorflow/serving/client/TensorFlowServingClientTest.java
 create mode 100644 src/test/java/com/github/tadayosi/tensorflow/serving/client/TensorFlowServingTestSupport.java
 create mode 100644 src/test/resources/data/0.png
 create mode 100644 src/test/resources/data/1.png
 create mode 100644 src/test/resources/data/kitten.jpg
 create mode 100644 src/test/resources/log4j2.properties
 create mode 100644 src/test/resources/testdata/saved_model_half_plus_two_cpu/00000123/assets/foo.txt
 create mode 100644 src/test/resources/testdata/saved_model_half_plus_two_cpu/00000123/saved_model.pb
 create mode 100644 src/test/resources/testdata/saved_model_half_plus_two_cpu/00000123/variables/variables.data-00000-of-00001
 create mode 100644 src/test/resources/testdata/saved_model_half_plus_two_cpu/00000123/variables/variables.index
 create mode 100644 src/test/resources/tfsc4j.properties

diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 0000000..e567d42
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,16 @@
+root = true
+
+[*]
+end_of_line = lf
+indent_size = 4
+indent_style = space
+insert_final_newline = true
+
+[*.{yml,yaml}]
+indent_size = 2
+
+[*.json]
+indent_size = 2
+
+[*.md]
+indent_size = 2
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000..b52e6a4
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,6 @@
+version: 2
+updates:
+  - package-ecosystem: maven
+    directory: '/'
+    schedule:
+      interval: monthly
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
new file mode 100644
index 0000000..c53d312
--- /dev/null
+++ b/.github/workflows/test.yml
@@ -0,0 +1,37 @@
+name: Test
+
+on:
+  pull_request:
+    branches:
+      - main
+    paths-ignore:
+      - "**.md"
+  push:
+    branches:
+      - main
+    paths-ignore:
+      - "**.md"
+
+concurrency:
+  group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+  cancel-in-progress: true
+
+jobs:
+  build:
+    runs-on: ubuntu-latest
+    strategy:
+      fail-fast: false
+      matrix:
+        java: [ '17', '21' ]
+    steps:
+      - name: Checkout code
+        uses: actions/checkout@v4
+      - name: Set up Java
+        uses: actions/setup-java@v4
+        with:
+          distribution: 'temurin'
+          java-version: ${{ matrix.java }}
+          cache: 'maven'
+      - name: Build & test
+        run: |
+          mvn --batch-mode --no-transfer-progress install
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..2a40a20
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,10 @@
+.*
+!.gitignore
+!.editorconfig
+
+target/
+/data
+
+# IDE
+.idea/
+.vscode/
diff --git a/.mvn/wrapper/maven-wrapper.properties b/.mvn/wrapper/maven-wrapper.properties
new file mode 100644
index 0000000..f95f1ee
--- /dev/null
+++ b/.mvn/wrapper/maven-wrapper.properties
@@ -0,0 +1,19 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+wrapperVersion=3.3.2
+distributionType=only-script
+distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.8/apache-maven-3.9.8-bin.zip
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..9f8745c
--- /dev/null
+++ b/README.md
@@ -0,0 +1,233 @@
+# TensorFlow Serving Client for Java
+
+[![Release](https://jitpack.io/v/tadayosi/tensorflow-serving-client-java.svg)](<https://jitpack.io/#tadayosi/tensorflow-serving-client-java>)
+[![Test](https://github.com/tadayosi/tensorflow-serving-client-java/actions/workflows/test.yml/badge.svg)](https://github.com/tadayosi/tensorflow-serving-client-java/actions/workflows/test.yml)
+
+TensorFlow Serving Client for Java (TFSC4J) is a Java client library for [TensorFlow Serving](https://github.com/tensorflow/serving). It supports the following [TensorFlow Serving REST API](https://www.tensorflow.org/tfx/serving/api_rest):
+
+- [Model status API](https://www.tensorflow.org/tfx/serving/api_rest#model_status_api)
+- [Model Metadata API](https://www.tensorflow.org/tfx/serving/api_rest#model_metadata_api)
+- [Classify and Regress API](https://www.tensorflow.org/tfx/serving/api_rest#classify_and_regress_api)
+- [Predict API](https://www.tensorflow.org/tfx/serving/api_rest#predict_api)
+
+## Requirements
+
+- Java 17+
+
+## Install
+
+1. Add the [JitPack](https://jitpack.io) repository to your `pom.xml`:
+
+    ```xml
+    <repositories>
+        <repository>
+            <id>jitpack.io</id>
+            <url>https://jitpack.io</url>
+        </repository>
+    </repositories>
+    ```
+
+2. Add the dependency:
+
+    ```xml
+    <dependency>
+        <groupId>com.github.tadayosi</groupId>
+        <artifactId>tensorflow-serving-client-java</artifactId>
+        <version>v0.3</version>
+    </dependency>
+    ```
+
+## Usage
+
+### Inference
+
+- Prediction:
+
+  ```java
+  TensorFlowServingClient client = TensorFlowServingClient.newInstance();
+
+  byte[] image = Files.readAllBytes(Path.of("0.png"));
+  Object result = client.inference().predictions("mnist_v2", image);
+  System.out.println(result);
+  // => 0
+  ```
+
+- With the inference API endpoint other than <http://localhost:8080>:
+
+  ```java
+  TensorFlowServingClient client = TensorFlowServingClient.builder()
+      .inferenceAddress("http://localhost:12345")
+      .build();
+  ```
+
+- With token authorization:
+
+  ```java
+  TensorFlowServingClient client = TensorFlowServingClient.builder()
+      .inferenceKey("<inference-key>")
+      .build();
+  ```
+
+### Management
+
+- Register a model:
+
+  ```java
+  TensorFlowServingClient client = TensorFlowServingClient.newInstance();
+
+  Response response = client.management().registerModel(
+    "https://torchserve.pytorch.org/mar_files/mnist_v2.mar",
+    RegisterModelOptions.empty());
+  System.out.println(response.getStatus());
+  // => "Model "mnist_v2" Version: 2.0 registered with 0 initial workers. Use scale workers API to add workers for the model."
+  ```
+
+- Scale workers for a model:
+
+  ```java
+  TensorFlowServingClient client = TensorFlowServingClient.newInstance();
+
+  Response response = client.management().setAutoScale(
+    "mnist_v2",
+    SetAutoScaleOptions.builder()
+      .minWorker(1)
+      .maxWorker(2)
+      .build());
+  System.out.println(response.getStatus());
+  // => "Processing worker updates..."
+  ```
+
+- Describe a model:
+
+  ```java
+  TensorFlowServingClient client = TensorFlowServingClient.newInstance();
+
+  List<ModelDetail> model = client.management().describeModel("mnist_v2");
+  System.out.println(model.get(0));
+  // =>
+  // ModelDetail {
+  //     modelName: mnist_v2
+  //     modelVersion: 2.0
+  // ...
+  ```
+
+- Unregister a model:
+
+  ```java
+  TensorFlowServingClient client = TensorFlowServingClient.newInstance();
+
+  Response response = client.management().unregisterModel(
+    "mnist_v2",
+    UnregisterModelOptions.empty());
+  System.out.println(response.getStatus());
+  // => "Model "mnist_v2" unregistered"
+  ```
+
+- List models:
+
+  ```java
+  TensorFlowServingClient client = TensorFlowServingClient.newInstance();
+
+  ModelList models = client.management().listModels(10, null);
+  System.out.println(models);
+  // =>
+  // ModelList {
+  //     nextPageToken: null
+  //     models: [Model {
+  //     modelName: mnist_v2
+  //     modelUrl: https://torchserve.pytorch.org/mar_files/mnist_v2.mar
+  // },
+  // ...
+  ```
+
+- Set default version for a model:
+
+  ```java
+  TensorFlowServingClient client = TensorFlowServingClient.newInstance();
+
+  Response response = client.management().setDefault("mnist_v2", "2.0");
+  System.out.println(response.getStatus());
+  // => "Default version successfully updated for model "mnist_v2" to "2.0""
+  ```
+
+- With the management API endpoint other than <http://localhost:8081>:
+
+  ```java
+  TensorFlowServingClient client = TensorFlowServingClient.builder()
+      .managementAddress("http://localhost:12345")
+      .build();
+  ```
+
+- With token authorization:
+
+  ```java
+  TensorFlowServingClient client = TensorFlowServingClient.builder()
+      .managementKey("<management-key>")
+      .build();
+  ```
+
+### Metrics
+
+- Get metrics in Prometheus format:
+
+  ```java
+  TensorFlowServingClient client = TensorFlowServingClient.newInstance();
+
+  String metrics = client.metrics().metrics();
+  System.out.println(metrics);
+  // =>
+  // # HELP MemoryUsed Torchserve prometheus gauge metric with unit: Megabytes
+  // # TYPE MemoryUsed gauge
+  // MemoryUsed{Level="Host",Hostname="3a9b51d41fbf",} 2075.09765625
+  // ...
+  ```
+
+- With the metrics API endpoint other than <http://localhost:8082>:
+
+  ```java
+  TensorFlowServingClient client = TensorFlowServingClient.builder()
+      .metricsAddress("http://localhost:12345")
+      .build();
+  ```
+
+## Configuration
+
+### tsc4j.properties
+
+```properties
+inference.key = <inference-key>
+inference.address = http://localhost:8080
+# inference.address takes precedence over inference.port if it's defined
+inference.port = 8080
+
+management.key = <management-key>
+management.address = http://localhost:8081
+# management.address takes precedence over management.port if it's defined
+management.port = 8081
+
+metrics.address = http://localhost:8082
+# metrics.address takes precedence over metrics.port if it's defined
+metrics.port = 8082
+```
+
+### System properties
+
+You can configure the TSC4J properties via system properties with prefix `tsc4j.`.
+
+For instance, you can configure `inference.address` with the `tsc4j.inference.address` system property.
+
+### Environment variables
+
+You can also configure the TSC4J properties via environment variables with prefix `TSC4J_`.
+
+For instance, you can configure `inference.address` with the `TSC4J_INFERENCE_ADDRESS` environment variable.
+
+## Examples
+
+See [examples](./examples/).
+
+## Build
+
+```console
+mvn clean install
+```
diff --git a/examples/describe_model.java b/examples/describe_model.java
new file mode 100755
index 0000000..ffc4cb1
--- /dev/null
+++ b/examples/describe_model.java
@@ -0,0 +1,24 @@
+///usr/bin/env jbang "$0" "$@" ; exit $?
+
+//JAVA 17+
+//REPOS mavencentral,jitpack=https://jitpack.io
+//DEPS com.github.tadayosi:torchserve-client-java:main-SNAPSHOT
+//DEPS org.slf4j:slf4j-simple:1.7.36
+
+import com.github.tadayosi.torchserve.client.TorchServeClient;
+import com.github.tadayosi.torchserve.client.model.ApiException;
+
+public class describe_model {
+
+    private static String MNIST_MODEL = "mnist_v2";
+
+    public static void main(String... args) throws Exception {
+        try {
+            var client = TorchServeClient.newInstance();
+            var models = client.management().describeModel(MNIST_MODEL);
+            System.out.println("Model> " + models.get(0));
+        } catch (ApiException e) {
+            e.printStackTrace();
+        }
+    }
+}
diff --git a/examples/list_models.java b/examples/list_models.java
new file mode 100755
index 0000000..be3f06f
--- /dev/null
+++ b/examples/list_models.java
@@ -0,0 +1,22 @@
+///usr/bin/env jbang "$0" "$@" ; exit $?
+
+//JAVA 17+
+//REPOS mavencentral,jitpack=https://jitpack.io
+//DEPS com.github.tadayosi:torchserve-client-java:main-SNAPSHOT
+//DEPS org.slf4j:slf4j-simple:1.7.36
+
+import com.github.tadayosi.torchserve.client.TorchServeClient;
+import com.github.tadayosi.torchserve.client.model.ApiException;
+
+public class list_models {
+
+    public static void main(String... args) throws Exception {
+        try {
+            var client = TorchServeClient.newInstance();
+            var models = client.management().listModels(10, null);
+            System.out.println("Models> " + models);
+        } catch (ApiException e) {
+            e.printStackTrace();
+        }
+    }
+}
diff --git a/examples/metrics.java b/examples/metrics.java
new file mode 100755
index 0000000..e938d9f
--- /dev/null
+++ b/examples/metrics.java
@@ -0,0 +1,22 @@
+///usr/bin/env jbang "$0" "$@" ; exit $?
+
+//JAVA 17+
+//REPOS mavencentral,jitpack=https://jitpack.io
+//DEPS com.github.tadayosi:torchserve-client-java:main-SNAPSHOT
+//DEPS org.slf4j:slf4j-simple:1.7.36
+
+import com.github.tadayosi.torchserve.client.TorchServeClient;
+import com.github.tadayosi.torchserve.client.model.ApiException;
+
+public class metrics {
+
+    public static void main(String... args) throws Exception {
+        try {
+            var client = TorchServeClient.newInstance();
+            var metrics = client.metrics().metrics();
+            System.out.println(metrics);
+        } catch (ApiException e) {
+            e.printStackTrace();
+        }
+    }
+}
diff --git a/examples/mnist.java b/examples/mnist.java
new file mode 100755
index 0000000..de8b40d
--- /dev/null
+++ b/examples/mnist.java
@@ -0,0 +1,31 @@
+///usr/bin/env jbang "$0" "$@" ; exit $?
+
+//JAVA 17+
+//REPOS mavencentral,jitpack=https://jitpack.io
+//DEPS com.github.tadayosi:torchserve-client-java:main-SNAPSHOT
+//DEPS org.slf4j:slf4j-simple:1.7.36
+
+import java.nio.file.Files;
+import java.nio.file.Path;
+
+import com.github.tadayosi.torchserve.client.TorchServeClient;
+import com.github.tadayosi.torchserve.client.model.ApiException;
+
+public class mnist {
+
+    private static String MNIST_MODEL = "mnist_v2";
+
+    public static void main(String... args) throws Exception {
+        var zero = Files.readAllBytes(Path.of("src/test/resources/data/0.png"));
+        var one = Files.readAllBytes(Path.of("src/test/resources/data/1.png"));
+        try {
+            var client = TorchServeClient.newInstance();
+            var result0 = client.inference().predictions(MNIST_MODEL, zero);
+            System.out.println("Answer> " + result0);
+            var result1 = client.inference().predictions(MNIST_MODEL, one);
+            System.out.println("Answer> " + result1);
+        } catch (ApiException e) {
+            e.printStackTrace();
+        }
+    }
+}
diff --git a/examples/register_mnist.java b/examples/register_mnist.java
new file mode 100755
index 0000000..3d6e11c
--- /dev/null
+++ b/examples/register_mnist.java
@@ -0,0 +1,32 @@
+///usr/bin/env jbang "$0" "$@" ; exit $?
+
+//JAVA 17+
+//REPOS mavencentral,jitpack=https://jitpack.io
+//DEPS com.github.tadayosi:torchserve-client-java:main-SNAPSHOT
+//DEPS org.slf4j:slf4j-simple:1.7.36
+
+import com.github.tadayosi.torchserve.client.TorchServeClient;
+import com.github.tadayosi.torchserve.client.model.ApiException;
+import com.github.tadayosi.torchserve.client.model.RegisterModelOptions;
+import com.github.tadayosi.torchserve.client.model.SetAutoScaleOptions;
+
+public class register_mnist {
+
+    private static String MNIST_URL = "https://torchserve.pytorch.org/mar_files/mnist_v2.mar";
+    private static String MNIST_MODEL = "mnist_v2";
+
+    public static void main(String... args) throws Exception {
+        try {
+            var client = TorchServeClient.newInstance();
+            var response = client.management().registerModel(MNIST_URL, RegisterModelOptions.empty());
+            System.out.println("registerModel> " + response.getStatus());
+            response = client.management().setAutoScale(MNIST_MODEL, SetAutoScaleOptions.builder()
+                    .minWorker(1)
+                    .maxWorker(1)
+                    .build());
+            System.out.println("setAutoScale> " + response.getStatus());
+        } catch (ApiException e) {
+            e.printStackTrace();
+        }
+    }
+}
diff --git a/examples/set_default.java b/examples/set_default.java
new file mode 100755
index 0000000..fdb4067
--- /dev/null
+++ b/examples/set_default.java
@@ -0,0 +1,24 @@
+///usr/bin/env jbang "$0" "$@" ; exit $?
+
+//JAVA 17+
+//REPOS mavencentral,jitpack=https://jitpack.io
+//DEPS com.github.tadayosi:torchserve-client-java:main-SNAPSHOT
+//DEPS org.slf4j:slf4j-simple:1.7.36
+
+import com.github.tadayosi.torchserve.client.TorchServeClient;
+import com.github.tadayosi.torchserve.client.model.ApiException;
+
+public class set_default {
+
+    private static String MNIST_MODEL = "mnist_v2";
+
+    public static void main(String... args) throws Exception {
+        try {
+            var client = TorchServeClient.newInstance();
+            var response = client.management().setDefault(MNIST_MODEL, "2.0");
+            System.out.println("Status> " + response.getStatus());
+        } catch (ApiException e) {
+            e.printStackTrace();
+        }
+    }
+}
diff --git a/examples/token_auth.java b/examples/token_auth.java
new file mode 100755
index 0000000..5d7aefd
--- /dev/null
+++ b/examples/token_auth.java
@@ -0,0 +1,30 @@
+///usr/bin/env jbang "$0" "$@" ; exit $?
+
+//JAVA 17+
+//REPOS mavencentral,jitpack=https://jitpack.io
+//DEPS com.github.tadayosi:torchserve-client-java:main-SNAPSHOT
+//DEPS org.slf4j:slf4j-simple:1.7.36
+
+import com.github.tadayosi.torchserve.client.TorchServeClient;
+import com.github.tadayosi.torchserve.client.model.ApiException;
+
+public class token_auth {
+
+    /**
+     * You can find the auth token for the inference API by looking at the
+     * `key_file.json` in the current directory of the TorchServe server.
+     */
+    private static String INFERENCE_AUTH_TOKEN = "<inference-key>";
+
+    public static void main(String... args) throws Exception {
+        try {
+            var client = TorchServeClient.builder()
+                    .inferenceKey(INFERENCE_AUTH_TOKEN)
+                    .build();
+            var response = client.inference().ping();
+            System.out.println(response);
+        } catch (ApiException e) {
+            e.printStackTrace();
+        }
+    }
+}
diff --git a/examples/unregister_mnist.java b/examples/unregister_mnist.java
new file mode 100755
index 0000000..383efe9
--- /dev/null
+++ b/examples/unregister_mnist.java
@@ -0,0 +1,25 @@
+///usr/bin/env jbang "$0" "$@" ; exit $?
+
+//JAVA 17+
+//REPOS mavencentral,jitpack=https://jitpack.io
+//DEPS com.github.tadayosi:torchserve-client-java:main-SNAPSHOT
+//DEPS org.slf4j:slf4j-simple:1.7.36
+
+import com.github.tadayosi.torchserve.client.TorchServeClient;
+import com.github.tadayosi.torchserve.client.model.ApiException;
+import com.github.tadayosi.torchserve.client.model.UnregisterModelOptions;
+
+public class unregister_mnist {
+
+    private static String MNIST_MODEL = "mnist_v2";
+
+    public static void main(String... args) throws Exception {
+        try {
+            var client = TorchServeClient.newInstance();
+            var response = client.management().unregisterModel(MNIST_MODEL, UnregisterModelOptions.empty());
+            System.out.println("Status> " + response.getStatus());
+        } catch (ApiException e) {
+            e.printStackTrace();
+        }
+    }
+}
diff --git a/mvnw b/mvnw
new file mode 100755
index 0000000..19529dd
--- /dev/null
+++ b/mvnw
@@ -0,0 +1,259 @@
+#!/bin/sh
+# ----------------------------------------------------------------------------
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+# ----------------------------------------------------------------------------
+
+# ----------------------------------------------------------------------------
+# Apache Maven Wrapper startup batch script, version 3.3.2
+#
+# Optional ENV vars
+# -----------------
+#   JAVA_HOME - location of a JDK home dir, required when download maven via java source
+#   MVNW_REPOURL - repo url base for downloading maven distribution
+#   MVNW_USERNAME/MVNW_PASSWORD - user and password for downloading maven
+#   MVNW_VERBOSE - true: enable verbose log; debug: trace the mvnw script; others: silence the output
+# ----------------------------------------------------------------------------
+
+set -euf
+[ "${MVNW_VERBOSE-}" != debug ] || set -x
+
+# OS specific support.
+native_path() { printf %s\\n "$1"; }
+case "$(uname)" in
+CYGWIN* | MINGW*)
+  [ -z "${JAVA_HOME-}" ] || JAVA_HOME="$(cygpath --unix "$JAVA_HOME")"
+  native_path() { cygpath --path --windows "$1"; }
+  ;;
+esac
+
+# set JAVACMD and JAVACCMD
+set_java_home() {
+  # For Cygwin and MinGW, ensure paths are in Unix format before anything is touched
+  if [ -n "${JAVA_HOME-}" ]; then
+    if [ -x "$JAVA_HOME/jre/sh/java" ]; then
+      # IBM's JDK on AIX uses strange locations for the executables
+      JAVACMD="$JAVA_HOME/jre/sh/java"
+      JAVACCMD="$JAVA_HOME/jre/sh/javac"
+    else
+      JAVACMD="$JAVA_HOME/bin/java"
+      JAVACCMD="$JAVA_HOME/bin/javac"
+
+      if [ ! -x "$JAVACMD" ] || [ ! -x "$JAVACCMD" ]; then
+        echo "The JAVA_HOME environment variable is not defined correctly, so mvnw cannot run." >&2
+        echo "JAVA_HOME is set to \"$JAVA_HOME\", but \"\$JAVA_HOME/bin/java\" or \"\$JAVA_HOME/bin/javac\" does not exist." >&2
+        return 1
+      fi
+    fi
+  else
+    JAVACMD="$(
+      'set' +e
+      'unset' -f command 2>/dev/null
+      'command' -v java
+    )" || :
+    JAVACCMD="$(
+      'set' +e
+      'unset' -f command 2>/dev/null
+      'command' -v javac
+    )" || :
+
+    if [ ! -x "${JAVACMD-}" ] || [ ! -x "${JAVACCMD-}" ]; then
+      echo "The java/javac command does not exist in PATH nor is JAVA_HOME set, so mvnw cannot run." >&2
+      return 1
+    fi
+  fi
+}
+
+# hash string like Java String::hashCode
+hash_string() {
+  str="${1:-}" h=0
+  while [ -n "$str" ]; do
+    char="${str%"${str#?}"}"
+    h=$(((h * 31 + $(LC_CTYPE=C printf %d "'$char")) % 4294967296))
+    str="${str#?}"
+  done
+  printf %x\\n $h
+}
+
+verbose() { :; }
+[ "${MVNW_VERBOSE-}" != true ] || verbose() { printf %s\\n "${1-}"; }
+
+die() {
+  printf %s\\n "$1" >&2
+  exit 1
+}
+
+trim() {
+  # MWRAPPER-139:
+  #   Trims trailing and leading whitespace, carriage returns, tabs, and linefeeds.
+  #   Needed for removing poorly interpreted newline sequences when running in more
+  #   exotic environments such as mingw bash on Windows.
+  printf "%s" "${1}" | tr -d '[:space:]'
+}
+
+# parse distributionUrl and optional distributionSha256Sum, requires .mvn/wrapper/maven-wrapper.properties
+while IFS="=" read -r key value; do
+  case "${key-}" in
+  distributionUrl) distributionUrl=$(trim "${value-}") ;;
+  distributionSha256Sum) distributionSha256Sum=$(trim "${value-}") ;;
+  esac
+done <"${0%/*}/.mvn/wrapper/maven-wrapper.properties"
+[ -n "${distributionUrl-}" ] || die "cannot read distributionUrl property in ${0%/*}/.mvn/wrapper/maven-wrapper.properties"
+
+case "${distributionUrl##*/}" in
+maven-mvnd-*bin.*)
+  MVN_CMD=mvnd.sh _MVNW_REPO_PATTERN=/maven/mvnd/
+  case "${PROCESSOR_ARCHITECTURE-}${PROCESSOR_ARCHITEW6432-}:$(uname -a)" in
+  *AMD64:CYGWIN* | *AMD64:MINGW*) distributionPlatform=windows-amd64 ;;
+  :Darwin*x86_64) distributionPlatform=darwin-amd64 ;;
+  :Darwin*arm64) distributionPlatform=darwin-aarch64 ;;
+  :Linux*x86_64*) distributionPlatform=linux-amd64 ;;
+  *)
+    echo "Cannot detect native platform for mvnd on $(uname)-$(uname -m), use pure java version" >&2
+    distributionPlatform=linux-amd64
+    ;;
+  esac
+  distributionUrl="${distributionUrl%-bin.*}-$distributionPlatform.zip"
+  ;;
+maven-mvnd-*) MVN_CMD=mvnd.sh _MVNW_REPO_PATTERN=/maven/mvnd/ ;;
+*) MVN_CMD="mvn${0##*/mvnw}" _MVNW_REPO_PATTERN=/org/apache/maven/ ;;
+esac
+
+# apply MVNW_REPOURL and calculate MAVEN_HOME
+# maven home pattern: ~/.m2/wrapper/dists/{apache-maven-<version>,maven-mvnd-<version>-<platform>}/<hash>
+[ -z "${MVNW_REPOURL-}" ] || distributionUrl="$MVNW_REPOURL$_MVNW_REPO_PATTERN${distributionUrl#*"$_MVNW_REPO_PATTERN"}"
+distributionUrlName="${distributionUrl##*/}"
+distributionUrlNameMain="${distributionUrlName%.*}"
+distributionUrlNameMain="${distributionUrlNameMain%-bin}"
+MAVEN_USER_HOME="${MAVEN_USER_HOME:-${HOME}/.m2}"
+MAVEN_HOME="${MAVEN_USER_HOME}/wrapper/dists/${distributionUrlNameMain-}/$(hash_string "$distributionUrl")"
+
+exec_maven() {
+  unset MVNW_VERBOSE MVNW_USERNAME MVNW_PASSWORD MVNW_REPOURL || :
+  exec "$MAVEN_HOME/bin/$MVN_CMD" "$@" || die "cannot exec $MAVEN_HOME/bin/$MVN_CMD"
+}
+
+if [ -d "$MAVEN_HOME" ]; then
+  verbose "found existing MAVEN_HOME at $MAVEN_HOME"
+  exec_maven "$@"
+fi
+
+case "${distributionUrl-}" in
+*?-bin.zip | *?maven-mvnd-?*-?*.zip) ;;
+*) die "distributionUrl is not valid, must match *-bin.zip or maven-mvnd-*.zip, but found '${distributionUrl-}'" ;;
+esac
+
+# prepare tmp dir
+if TMP_DOWNLOAD_DIR="$(mktemp -d)" && [ -d "$TMP_DOWNLOAD_DIR" ]; then
+  clean() { rm -rf -- "$TMP_DOWNLOAD_DIR"; }
+  trap clean HUP INT TERM EXIT
+else
+  die "cannot create temp dir"
+fi
+
+mkdir -p -- "${MAVEN_HOME%/*}"
+
+# Download and Install Apache Maven
+verbose "Couldn't find MAVEN_HOME, downloading and installing it ..."
+verbose "Downloading from: $distributionUrl"
+verbose "Downloading to: $TMP_DOWNLOAD_DIR/$distributionUrlName"
+
+# select .zip or .tar.gz
+if ! command -v unzip >/dev/null; then
+  distributionUrl="${distributionUrl%.zip}.tar.gz"
+  distributionUrlName="${distributionUrl##*/}"
+fi
+
+# verbose opt
+__MVNW_QUIET_WGET=--quiet __MVNW_QUIET_CURL=--silent __MVNW_QUIET_UNZIP=-q __MVNW_QUIET_TAR=''
+[ "${MVNW_VERBOSE-}" != true ] || __MVNW_QUIET_WGET='' __MVNW_QUIET_CURL='' __MVNW_QUIET_UNZIP='' __MVNW_QUIET_TAR=v
+
+# normalize http auth
+case "${MVNW_PASSWORD:+has-password}" in
+'') MVNW_USERNAME='' MVNW_PASSWORD='' ;;
+has-password) [ -n "${MVNW_USERNAME-}" ] || MVNW_USERNAME='' MVNW_PASSWORD='' ;;
+esac
+
+if [ -z "${MVNW_USERNAME-}" ] && command -v wget >/dev/null; then
+  verbose "Found wget ... using wget"
+  wget ${__MVNW_QUIET_WGET:+"$__MVNW_QUIET_WGET"} "$distributionUrl" -O "$TMP_DOWNLOAD_DIR/$distributionUrlName" || die "wget: Failed to fetch $distributionUrl"
+elif [ -z "${MVNW_USERNAME-}" ] && command -v curl >/dev/null; then
+  verbose "Found curl ... using curl"
+  curl ${__MVNW_QUIET_CURL:+"$__MVNW_QUIET_CURL"} -f -L -o "$TMP_DOWNLOAD_DIR/$distributionUrlName" "$distributionUrl" || die "curl: Failed to fetch $distributionUrl"
+elif set_java_home; then
+  verbose "Falling back to use Java to download"
+  javaSource="$TMP_DOWNLOAD_DIR/Downloader.java"
+  targetZip="$TMP_DOWNLOAD_DIR/$distributionUrlName"
+  cat >"$javaSource" <<-END
+	public class Downloader extends java.net.Authenticator
+	{
+	  protected java.net.PasswordAuthentication getPasswordAuthentication()
+	  {
+	    return new java.net.PasswordAuthentication( System.getenv( "MVNW_USERNAME" ), System.getenv( "MVNW_PASSWORD" ).toCharArray() );
+	  }
+	  public static void main( String[] args ) throws Exception
+	  {
+	    setDefault( new Downloader() );
+	    java.nio.file.Files.copy( java.net.URI.create( args[0] ).toURL().openStream(), java.nio.file.Paths.get( args[1] ).toAbsolutePath().normalize() );
+	  }
+	}
+	END
+  # For Cygwin/MinGW, switch paths to Windows format before running javac and java
+  verbose " - Compiling Downloader.java ..."
+  "$(native_path "$JAVACCMD")" "$(native_path "$javaSource")" || die "Failed to compile Downloader.java"
+  verbose " - Running Downloader.java ..."
+  "$(native_path "$JAVACMD")" -cp "$(native_path "$TMP_DOWNLOAD_DIR")" Downloader "$distributionUrl" "$(native_path "$targetZip")"
+fi
+
+# If specified, validate the SHA-256 sum of the Maven distribution zip file
+if [ -n "${distributionSha256Sum-}" ]; then
+  distributionSha256Result=false
+  if [ "$MVN_CMD" = mvnd.sh ]; then
+    echo "Checksum validation is not supported for maven-mvnd." >&2
+    echo "Please disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." >&2
+    exit 1
+  elif command -v sha256sum >/dev/null; then
+    if echo "$distributionSha256Sum  $TMP_DOWNLOAD_DIR/$distributionUrlName" | sha256sum -c >/dev/null 2>&1; then
+      distributionSha256Result=true
+    fi
+  elif command -v shasum >/dev/null; then
+    if echo "$distributionSha256Sum  $TMP_DOWNLOAD_DIR/$distributionUrlName" | shasum -a 256 -c >/dev/null 2>&1; then
+      distributionSha256Result=true
+    fi
+  else
+    echo "Checksum validation was requested but neither 'sha256sum' or 'shasum' are available." >&2
+    echo "Please install either command, or disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." >&2
+    exit 1
+  fi
+  if [ $distributionSha256Result = false ]; then
+    echo "Error: Failed to validate Maven distribution SHA-256, your Maven distribution might be compromised." >&2
+    echo "If you updated your Maven version, you need to update the specified distributionSha256Sum property." >&2
+    exit 1
+  fi
+fi
+
+# unzip and move
+if command -v unzip >/dev/null; then
+  unzip ${__MVNW_QUIET_UNZIP:+"$__MVNW_QUIET_UNZIP"} "$TMP_DOWNLOAD_DIR/$distributionUrlName" -d "$TMP_DOWNLOAD_DIR" || die "failed to unzip"
+else
+  tar xzf${__MVNW_QUIET_TAR:+"$__MVNW_QUIET_TAR"} "$TMP_DOWNLOAD_DIR/$distributionUrlName" -C "$TMP_DOWNLOAD_DIR" || die "failed to untar"
+fi
+printf %s\\n "$distributionUrl" >"$TMP_DOWNLOAD_DIR/$distributionUrlNameMain/mvnw.url"
+mv -- "$TMP_DOWNLOAD_DIR/$distributionUrlNameMain" "$MAVEN_HOME" || [ -d "$MAVEN_HOME" ] || die "fail to move MAVEN_HOME"
+
+clean || :
+exec_maven "$@"
diff --git a/mvnw.cmd b/mvnw.cmd
new file mode 100644
index 0000000..249bdf3
--- /dev/null
+++ b/mvnw.cmd
@@ -0,0 +1,149 @@
+<# : batch portion
+@REM ----------------------------------------------------------------------------
+@REM Licensed to the Apache Software Foundation (ASF) under one
+@REM or more contributor license agreements.  See the NOTICE file
+@REM distributed with this work for additional information
+@REM regarding copyright ownership.  The ASF licenses this file
+@REM to you under the Apache License, Version 2.0 (the
+@REM "License"); you may not use this file except in compliance
+@REM with the License.  You may obtain a copy of the License at
+@REM
+@REM    http://www.apache.org/licenses/LICENSE-2.0
+@REM
+@REM Unless required by applicable law or agreed to in writing,
+@REM software distributed under the License is distributed on an
+@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+@REM KIND, either express or implied.  See the License for the
+@REM specific language governing permissions and limitations
+@REM under the License.
+@REM ----------------------------------------------------------------------------
+
+@REM ----------------------------------------------------------------------------
+@REM Apache Maven Wrapper startup batch script, version 3.3.2
+@REM
+@REM Optional ENV vars
+@REM   MVNW_REPOURL - repo url base for downloading maven distribution
+@REM   MVNW_USERNAME/MVNW_PASSWORD - user and password for downloading maven
+@REM   MVNW_VERBOSE - true: enable verbose log; others: silence the output
+@REM ----------------------------------------------------------------------------
+
+@IF "%__MVNW_ARG0_NAME__%"=="" (SET __MVNW_ARG0_NAME__=%~nx0)
+@SET __MVNW_CMD__=
+@SET __MVNW_ERROR__=
+@SET __MVNW_PSMODULEP_SAVE=%PSModulePath%
+@SET PSModulePath=
+@FOR /F "usebackq tokens=1* delims==" %%A IN (`powershell -noprofile "& {$scriptDir='%~dp0'; $script='%__MVNW_ARG0_NAME__%'; icm -ScriptBlock ([Scriptblock]::Create((Get-Content -Raw '%~f0'))) -NoNewScope}"`) DO @(
+  IF "%%A"=="MVN_CMD" (set __MVNW_CMD__=%%B) ELSE IF "%%B"=="" (echo %%A) ELSE (echo %%A=%%B)
+)
+@SET PSModulePath=%__MVNW_PSMODULEP_SAVE%
+@SET __MVNW_PSMODULEP_SAVE=
+@SET __MVNW_ARG0_NAME__=
+@SET MVNW_USERNAME=
+@SET MVNW_PASSWORD=
+@IF NOT "%__MVNW_CMD__%"=="" (%__MVNW_CMD__% %*)
+@echo Cannot start maven from wrapper >&2 && exit /b 1
+@GOTO :EOF
+: end batch / begin powershell #>
+
+$ErrorActionPreference = "Stop"
+if ($env:MVNW_VERBOSE -eq "true") {
+  $VerbosePreference = "Continue"
+}
+
+# calculate distributionUrl, requires .mvn/wrapper/maven-wrapper.properties
+$distributionUrl = (Get-Content -Raw "$scriptDir/.mvn/wrapper/maven-wrapper.properties" | ConvertFrom-StringData).distributionUrl
+if (!$distributionUrl) {
+  Write-Error "cannot read distributionUrl property in $scriptDir/.mvn/wrapper/maven-wrapper.properties"
+}
+
+switch -wildcard -casesensitive ( $($distributionUrl -replace '^.*/','') ) {
+  "maven-mvnd-*" {
+    $USE_MVND = $true
+    $distributionUrl = $distributionUrl -replace '-bin\.[^.]*$',"-windows-amd64.zip"
+    $MVN_CMD = "mvnd.cmd"
+    break
+  }
+  default {
+    $USE_MVND = $false
+    $MVN_CMD = $script -replace '^mvnw','mvn'
+    break
+  }
+}
+
+# apply MVNW_REPOURL and calculate MAVEN_HOME
+# maven home pattern: ~/.m2/wrapper/dists/{apache-maven-<version>,maven-mvnd-<version>-<platform>}/<hash>
+if ($env:MVNW_REPOURL) {
+  $MVNW_REPO_PATTERN = if ($USE_MVND) { "/org/apache/maven/" } else { "/maven/mvnd/" }
+  $distributionUrl = "$env:MVNW_REPOURL$MVNW_REPO_PATTERN$($distributionUrl -replace '^.*'+$MVNW_REPO_PATTERN,'')"
+}
+$distributionUrlName = $distributionUrl -replace '^.*/',''
+$distributionUrlNameMain = $distributionUrlName -replace '\.[^.]*$','' -replace '-bin$',''
+$MAVEN_HOME_PARENT = "$HOME/.m2/wrapper/dists/$distributionUrlNameMain"
+if ($env:MAVEN_USER_HOME) {
+  $MAVEN_HOME_PARENT = "$env:MAVEN_USER_HOME/wrapper/dists/$distributionUrlNameMain"
+}
+$MAVEN_HOME_NAME = ([System.Security.Cryptography.MD5]::Create().ComputeHash([byte[]][char[]]$distributionUrl) | ForEach-Object {$_.ToString("x2")}) -join ''
+$MAVEN_HOME = "$MAVEN_HOME_PARENT/$MAVEN_HOME_NAME"
+
+if (Test-Path -Path "$MAVEN_HOME" -PathType Container) {
+  Write-Verbose "found existing MAVEN_HOME at $MAVEN_HOME"
+  Write-Output "MVN_CMD=$MAVEN_HOME/bin/$MVN_CMD"
+  exit $?
+}
+
+if (! $distributionUrlNameMain -or ($distributionUrlName -eq $distributionUrlNameMain)) {
+  Write-Error "distributionUrl is not valid, must end with *-bin.zip, but found $distributionUrl"
+}
+
+# prepare tmp dir
+$TMP_DOWNLOAD_DIR_HOLDER = New-TemporaryFile
+$TMP_DOWNLOAD_DIR = New-Item -Itemtype Directory -Path "$TMP_DOWNLOAD_DIR_HOLDER.dir"
+$TMP_DOWNLOAD_DIR_HOLDER.Delete() | Out-Null
+trap {
+  if ($TMP_DOWNLOAD_DIR.Exists) {
+    try { Remove-Item $TMP_DOWNLOAD_DIR -Recurse -Force | Out-Null }
+    catch { Write-Warning "Cannot remove $TMP_DOWNLOAD_DIR" }
+  }
+}
+
+New-Item -Itemtype Directory -Path "$MAVEN_HOME_PARENT" -Force | Out-Null
+
+# Download and Install Apache Maven
+Write-Verbose "Couldn't find MAVEN_HOME, downloading and installing it ..."
+Write-Verbose "Downloading from: $distributionUrl"
+Write-Verbose "Downloading to: $TMP_DOWNLOAD_DIR/$distributionUrlName"
+
+$webclient = New-Object System.Net.WebClient
+if ($env:MVNW_USERNAME -and $env:MVNW_PASSWORD) {
+  $webclient.Credentials = New-Object System.Net.NetworkCredential($env:MVNW_USERNAME, $env:MVNW_PASSWORD)
+}
+[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
+$webclient.DownloadFile($distributionUrl, "$TMP_DOWNLOAD_DIR/$distributionUrlName") | Out-Null
+
+# If specified, validate the SHA-256 sum of the Maven distribution zip file
+$distributionSha256Sum = (Get-Content -Raw "$scriptDir/.mvn/wrapper/maven-wrapper.properties" | ConvertFrom-StringData).distributionSha256Sum
+if ($distributionSha256Sum) {
+  if ($USE_MVND) {
+    Write-Error "Checksum validation is not supported for maven-mvnd. `nPlease disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties."
+  }
+  Import-Module $PSHOME\Modules\Microsoft.PowerShell.Utility -Function Get-FileHash
+  if ((Get-FileHash "$TMP_DOWNLOAD_DIR/$distributionUrlName" -Algorithm SHA256).Hash.ToLower() -ne $distributionSha256Sum) {
+    Write-Error "Error: Failed to validate Maven distribution SHA-256, your Maven distribution might be compromised. If you updated your Maven version, you need to update the specified distributionSha256Sum property."
+  }
+}
+
+# unzip and move
+Expand-Archive "$TMP_DOWNLOAD_DIR/$distributionUrlName" -DestinationPath "$TMP_DOWNLOAD_DIR" | Out-Null
+Rename-Item -Path "$TMP_DOWNLOAD_DIR/$distributionUrlNameMain" -NewName $MAVEN_HOME_NAME | Out-Null
+try {
+  Move-Item -Path "$TMP_DOWNLOAD_DIR/$MAVEN_HOME_NAME" -Destination $MAVEN_HOME_PARENT | Out-Null
+} catch {
+  if (! (Test-Path -Path "$MAVEN_HOME" -PathType Container)) {
+    Write-Error "fail to move MAVEN_HOME"
+  }
+} finally {
+  try { Remove-Item $TMP_DOWNLOAD_DIR -Recurse -Force | Out-Null }
+  catch { Write-Warning "Cannot remove $TMP_DOWNLOAD_DIR" }
+}
+
+Write-Output "MVN_CMD=$MAVEN_HOME/bin/$MVN_CMD"
diff --git a/pom.xml b/pom.xml
new file mode 100644
index 0000000..d08fbb8
--- /dev/null
+++ b/pom.xml
@@ -0,0 +1,166 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <groupId>com.github.tadayosi.tensorflow</groupId>
+    <artifactId>tensorflow-serving-client</artifactId>
+    <version>0.1-SNAPSHOT</version>
+
+    <name>TensorFlow Serving Client for Java</name>
+    <description>A Java client library for TensorFlow Serving</description>
+    <url>https://github.com/tadayosi/tensorflow-serving-client-java</url>
+
+    <scm>
+        <connection>scm:git:git@github.com:tadayosi/tensorflow-serving-client-java.git</connection>
+        <developerConnection>scm:git:git@github.com:tadayosi/tensorflow-serving-client-java.git</developerConnection>
+        <url>git@github.com:tadayosi/tensorflow-serving-client-java.git</url>
+        <tag>HEAD</tag>
+    </scm>
+
+    <properties>
+        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
+        <maven.compiler.source>17</maven.compiler.source>
+        <maven.compiler.target>17</maven.compiler.target>
+        <maven.compiler.release>17</maven.compiler.release>
+
+        <annotations-api-version>6.0.53</annotations-api-version>
+        <grpc-version>1.68.1</grpc-version>
+        <junit-jupiter-version>5.11.1</junit-jupiter-version>
+        <log4j2-version>2.24.0</log4j2-version>
+        <protobuf-version>4.29.0-RC3</protobuf-version>
+        <slf4j-version>1.7.36</slf4j-version>
+        <testcontainers-version>1.20.2</testcontainers-version>
+
+        <build-helper-maven-plugin-version>3.6.0</build-helper-maven-plugin-version>
+        <maven-failsafe-plugin-version>3.5.0</maven-failsafe-plugin-version>
+        <maven-release-plugin-version>3.1.1</maven-release-plugin-version>
+        <maven-surefire-plugin-version>3.5.0</maven-surefire-plugin-version>
+        <protobuf-maven-plugin-version>2.7.0</protobuf-maven-plugin-version>
+    </properties>
+
+    <dependencyManagement>
+        <dependencies>
+            <dependency>
+                <groupId>org.junit</groupId>
+                <artifactId>junit-bom</artifactId>
+                <version>${junit-jupiter-version}</version>
+                <type>pom</type>
+                <scope>import</scope>
+            </dependency>
+            <dependency>
+                <groupId>org.testcontainers</groupId>
+                <artifactId>testcontainers-bom</artifactId>
+                <version>${testcontainers-version}</version>
+                <type>pom</type>
+                <scope>import</scope>
+            </dependency>
+        </dependencies>
+    </dependencyManagement>
+
+    <dependencies>
+        <dependency>
+            <groupId>com.google.protobuf</groupId>
+            <artifactId>protobuf-java</artifactId>
+            <version>${protobuf-version}</version>
+            <scope>compile</scope>
+        </dependency>
+        <dependency>
+            <groupId>io.grpc</groupId>
+            <artifactId>grpc-netty-shaded</artifactId>
+            <version>${grpc-version}</version>
+        </dependency>
+        <dependency>
+            <groupId>io.grpc</groupId>
+            <artifactId>grpc-protobuf</artifactId>
+            <version>${grpc-version}</version>
+        </dependency>
+        <dependency>
+            <groupId>io.grpc</groupId>
+            <artifactId>grpc-stub</artifactId>
+            <version>${grpc-version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.tomcat</groupId>
+            <artifactId>annotations-api</artifactId>
+            <version>${annotations-api-version}</version>
+        </dependency>
+
+        <!-- Logging -->
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-api</artifactId>
+            <version>${slf4j-version}</version>
+        </dependency>
+
+        <!-- Testing -->
+        <dependency>
+            <groupId>org.junit.jupiter</groupId>
+            <artifactId>junit-jupiter</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.testcontainers</groupId>
+            <artifactId>testcontainers</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.testcontainers</groupId>
+            <artifactId>junit-jupiter</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.logging.log4j</groupId>
+            <artifactId>log4j-slf4j-impl</artifactId>
+            <version>${log4j2-version}</version>
+            <scope>test</scope>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>io.github.ascopes</groupId>
+                <artifactId>protobuf-maven-plugin</artifactId>
+                <version>${protobuf-maven-plugin-version}</version>
+                <configuration>
+                    <protocVersion>${protobuf-version}</protocVersion>
+                    <sourceDirectories>
+                        <sourceDirectory>src/main/proto</sourceDirectory>
+                    </sourceDirectories>
+                    <binaryMavenPlugins>
+                        <binaryMavenPlugin>
+                            <groupId>io.grpc</groupId>
+                            <artifactId>protoc-gen-grpc-java</artifactId>
+                            <version>${grpc-version}</version>
+                        </binaryMavenPlugin>
+                    </binaryMavenPlugins>
+                </configuration>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>generate</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <artifactId>maven-surefire-plugin</artifactId>
+                <version>${maven-surefire-plugin-version}</version>
+            </plugin>
+            <plugin>
+                <artifactId>maven-failsafe-plugin</artifactId>
+                <version>${maven-failsafe-plugin-version}</version>
+            </plugin>
+
+            <!-- Releasing -->
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-release-plugin</artifactId>
+                <version>${maven-release-plugin-version}</version>
+                <configuration>
+                    <tagNameFormat>v@{project.version}</tagNameFormat>
+                </configuration>
+            </plugin>
+        </plugins>
+    </build>
+</project>
diff --git a/src/main/java/com/github/tadayosi/tensorflow/serving/client/Configuration.java b/src/main/java/com/github/tadayosi/tensorflow/serving/client/Configuration.java
new file mode 100644
index 0000000..811b63a
--- /dev/null
+++ b/src/main/java/com/github/tadayosi/tensorflow/serving/client/Configuration.java
@@ -0,0 +1,65 @@
+package com.github.tadayosi.tensorflow.serving.client;
+
+import java.io.InputStream;
+import java.util.Optional;
+import java.util.Properties;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class Configuration {
+
+    private static final Logger LOG = LoggerFactory.getLogger(Configuration.class);
+
+    public static final String TFSC4J_PROPERTIES = "tfsc4j.properties";
+    public static final String TFSC4J_PREFIX = "tfsc4j.";
+
+    public static final String TARGET = "target";
+    public static final String CREDENTIALS = "credentials";
+
+    private final Optional<String> target;
+    private final Optional<String> credentials;
+
+    private Configuration() {
+        Properties props = loadProperties();
+
+        this.target = loadProperty(TARGET, props);
+        this.credentials = loadProperty(CREDENTIALS, props);
+    }
+
+    static Properties loadProperties() {
+        Properties properties = new Properties();
+        try {
+            InputStream is = Configuration.class.getClassLoader().getResourceAsStream(TFSC4J_PROPERTIES);
+            properties.load(is);
+        } catch (Exception e) {
+            // Ignore
+            LOG.debug("Failed to load properties file: {}", e.getMessage());
+        }
+        return properties;
+    }
+
+    /**
+     * Order of precedence: System properties > environment variables > properties file
+     */
+    static Optional<String> loadProperty(String key, Properties properties) {
+        String tsc4jKey = TFSC4J_PREFIX + key;
+        Optional<String> value = Optional.ofNullable(System.getProperty(tsc4jKey))
+            .or(() -> Optional.ofNullable(System.getenv(tsc4jKey.toUpperCase().replace(".", "_"))))
+            .or(() -> Optional.ofNullable(properties.getProperty(key)));
+        LOG.debug("Loaded property {}: {}", key, value.orElse(null));
+        return value;
+    }
+
+    public static Configuration load() {
+        return new Configuration();
+    }
+
+    public Optional<String> getTarget() {
+        return target;
+    }
+
+    public Optional<String> getCredentials() {
+        return credentials;
+    }
+}
diff --git a/src/main/java/com/github/tadayosi/tensorflow/serving/client/TensorFlowServingApi.java b/src/main/java/com/github/tadayosi/tensorflow/serving/client/TensorFlowServingApi.java
new file mode 100644
index 0000000..81f32f5
--- /dev/null
+++ b/src/main/java/com/github/tadayosi/tensorflow/serving/client/TensorFlowServingApi.java
@@ -0,0 +1,20 @@
+package com.github.tadayosi.tensorflow.serving.client;
+
+import tensorflow.serving.Classification;
+import tensorflow.serving.GetModelMetadata;
+import tensorflow.serving.GetModelStatus;
+import tensorflow.serving.Predict;
+import tensorflow.serving.RegressionOuterClass;
+
+public interface TensorFlowServingApi {
+
+    GetModelStatus.GetModelStatusResponse getModelStatus(GetModelStatus.GetModelStatusRequest request);
+
+    GetModelMetadata.GetModelMetadataResponse getModelMetadata(GetModelMetadata.GetModelMetadataRequest request);
+
+    Classification.ClassificationResponse classify(Classification.ClassificationRequest request);
+
+    RegressionOuterClass.RegressionResponse regress(RegressionOuterClass.RegressionRequest request);
+
+    Predict.PredictResponse predict(Predict.PredictRequest request);
+}
diff --git a/src/main/java/com/github/tadayosi/tensorflow/serving/client/TensorFlowServingClient.java b/src/main/java/com/github/tadayosi/tensorflow/serving/client/TensorFlowServingClient.java
new file mode 100644
index 0000000..35a3fcc
--- /dev/null
+++ b/src/main/java/com/github/tadayosi/tensorflow/serving/client/TensorFlowServingClient.java
@@ -0,0 +1,86 @@
+package com.github.tadayosi.tensorflow.serving.client;
+
+import java.util.Optional;
+
+import io.grpc.ChannelCredentials;
+import io.grpc.Grpc;
+import io.grpc.InsecureChannelCredentials;
+import io.grpc.ManagedChannel;
+import tensorflow.serving.Classification;
+import tensorflow.serving.GetModelMetadata;
+import tensorflow.serving.GetModelStatus;
+import tensorflow.serving.ModelServiceGrpc;
+import tensorflow.serving.Predict;
+import tensorflow.serving.PredictionServiceGrpc;
+import tensorflow.serving.RegressionOuterClass;
+
+public class TensorFlowServingClient implements TensorFlowServingApi {
+
+    private static final String DEFAULT_TARGET = "localhost:8501";
+
+    private final ModelServiceGrpc.ModelServiceBlockingStub modelService;
+    private final PredictionServiceGrpc.PredictionServiceBlockingStub predictionService;
+
+    private TensorFlowServingClient(String target, ChannelCredentials credentials) {
+        ManagedChannel channel = Grpc.newChannelBuilder(target, credentials).build();
+        this.modelService = ModelServiceGrpc.newBlockingStub(channel);
+        this.predictionService = PredictionServiceGrpc.newBlockingStub(channel);
+    }
+
+    public static TensorFlowServingClient newInstance() {
+        return builder().build();
+    }
+
+    public static Builder builder() {
+        return new Builder();
+    }
+
+    @Override
+    public GetModelStatus.GetModelStatusResponse getModelStatus(GetModelStatus.GetModelStatusRequest request) {
+        return modelService.getModelStatus(request);
+    }
+
+    @Override
+    public GetModelMetadata.GetModelMetadataResponse getModelMetadata(GetModelMetadata.GetModelMetadataRequest request) {
+        return predictionService.getModelMetadata(request);
+    }
+
+    @Override
+    public Classification.ClassificationResponse classify(Classification.ClassificationRequest request) {
+        return predictionService.classify(request);
+    }
+
+    @Override
+    public RegressionOuterClass.RegressionResponse regress(RegressionOuterClass.RegressionRequest request) {
+        return predictionService.regress(request);
+    }
+
+    @Override
+    public Predict.PredictResponse predict(Predict.PredictRequest request) {
+        return predictionService.predict(request);
+    }
+
+    public static class Builder {
+
+        private final Configuration configuration = Configuration.load();
+
+        private Optional<String> target = configuration.getTarget();
+        private Optional<String> credentials = configuration.getCredentials();
+
+        public Builder target(String target) {
+            this.target = Optional.of(target);
+            return this;
+        }
+
+        public Builder credentials(String credentials) {
+            this.credentials = Optional.of(credentials);
+            return this;
+        }
+
+        public TensorFlowServingClient build() {
+            return new TensorFlowServingClient(
+                target.orElse(DEFAULT_TARGET),
+                InsecureChannelCredentials.create());
+        }
+    }
+}
diff --git a/src/main/proto/google/protobuf/any.proto b/src/main/proto/google/protobuf/any.proto
new file mode 100644
index 0000000..eff44e5
--- /dev/null
+++ b/src/main/proto/google/protobuf/any.proto
@@ -0,0 +1,162 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option go_package = "google.golang.org/protobuf/types/known/anypb";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "AnyProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+
+// `Any` contains an arbitrary serialized protocol buffer message along with a
+// URL that describes the type of the serialized message.
+//
+// Protobuf library provides support to pack/unpack Any values in the form
+// of utility functions or additional generated methods of the Any type.
+//
+// Example 1: Pack and unpack a message in C++.
+//
+//     Foo foo = ...;
+//     Any any;
+//     any.PackFrom(foo);
+//     ...
+//     if (any.UnpackTo(&foo)) {
+//       ...
+//     }
+//
+// Example 2: Pack and unpack a message in Java.
+//
+//     Foo foo = ...;
+//     Any any = Any.pack(foo);
+//     ...
+//     if (any.is(Foo.class)) {
+//       foo = any.unpack(Foo.class);
+//     }
+//     // or ...
+//     if (any.isSameTypeAs(Foo.getDefaultInstance())) {
+//       foo = any.unpack(Foo.getDefaultInstance());
+//     }
+//
+//  Example 3: Pack and unpack a message in Python.
+//
+//     foo = Foo(...)
+//     any = Any()
+//     any.Pack(foo)
+//     ...
+//     if any.Is(Foo.DESCRIPTOR):
+//       any.Unpack(foo)
+//       ...
+//
+//  Example 4: Pack and unpack a message in Go
+//
+//      foo := &pb.Foo{...}
+//      any, err := anypb.New(foo)
+//      if err != nil {
+//        ...
+//      }
+//      ...
+//      foo := &pb.Foo{}
+//      if err := any.UnmarshalTo(foo); err != nil {
+//        ...
+//      }
+//
+// The pack methods provided by protobuf library will by default use
+// 'type.googleapis.com/full.type.name' as the type URL and the unpack
+// methods only use the fully qualified type name after the last '/'
+// in the type URL, for example "foo.bar.com/x/y.z" will yield type
+// name "y.z".
+//
+// JSON
+// ====
+// The JSON representation of an `Any` value uses the regular
+// representation of the deserialized, embedded message, with an
+// additional field `@type` which contains the type URL. Example:
+//
+//     package google.profile;
+//     message Person {
+//       string first_name = 1;
+//       string last_name = 2;
+//     }
+//
+//     {
+//       "@type": "type.googleapis.com/google.profile.Person",
+//       "firstName": <string>,
+//       "lastName": <string>
+//     }
+//
+// If the embedded message type is well-known and has a custom JSON
+// representation, that representation will be embedded adding a field
+// `value` which holds the custom JSON in addition to the `@type`
+// field. Example (for message [google.protobuf.Duration][]):
+//
+//     {
+//       "@type": "type.googleapis.com/google.protobuf.Duration",
+//       "value": "1.212s"
+//     }
+//
+message Any {
+  // A URL/resource name that uniquely identifies the type of the serialized
+  // protocol buffer message. This string must contain at least
+  // one "/" character. The last segment of the URL's path must represent
+  // the fully qualified name of the type (as in
+  // `path/google.protobuf.Duration`). The name should be in a canonical form
+  // (e.g., leading "." is not accepted).
+  //
+  // In practice, teams usually precompile into the binary all types that they
+  // expect it to use in the context of Any. However, for URLs which use the
+  // scheme `http`, `https`, or no scheme, one can optionally set up a type
+  // server that maps type URLs to message definitions as follows:
+  //
+  // * If no scheme is provided, `https` is assumed.
+  // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
+  //   value in binary format, or produce an error.
+  // * Applications are allowed to cache lookup results based on the
+  //   URL, or have them precompiled into a binary to avoid any
+  //   lookup. Therefore, binary compatibility needs to be preserved
+  //   on changes to types. (Use versioned type names to manage
+  //   breaking changes.)
+  //
+  // Note: this functionality is not currently available in the official
+  // protobuf release, and it is not used for type URLs beginning with
+  // type.googleapis.com. As of May 2023, there are no widely used type server
+  // implementations and no plans to implement one.
+  //
+  // Schemes other than `http`, `https` (or the empty scheme) might be
+  // used with implementation specific semantics.
+  //
+  string type_url = 1;
+
+  // Must be a valid serialized protocol buffer of the above specified type.
+  bytes value = 2;
+}
diff --git a/src/main/proto/google/protobuf/wrappers.proto b/src/main/proto/google/protobuf/wrappers.proto
new file mode 100644
index 0000000..1959fa5
--- /dev/null
+++ b/src/main/proto/google/protobuf/wrappers.proto
@@ -0,0 +1,123 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Wrappers for primitive (non-message) types. These types are useful
+// for embedding primitives in the `google.protobuf.Any` type and for places
+// where we need to distinguish between the absence of a primitive
+// typed field and its default value.
+//
+// These wrappers have no meaningful use within repeated fields as they lack
+// the ability to detect presence on individual elements.
+// These wrappers have no meaningful use within a map or a oneof since
+// individual entries of a map or fields of a oneof can already detect presence.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option cc_enable_arenas = true;
+option go_package = "google.golang.org/protobuf/types/known/wrapperspb";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "WrappersProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+
+// Wrapper message for `double`.
+//
+// The JSON representation for `DoubleValue` is JSON number.
+message DoubleValue {
+  // The double value.
+  double value = 1;
+}
+
+// Wrapper message for `float`.
+//
+// The JSON representation for `FloatValue` is JSON number.
+message FloatValue {
+  // The float value.
+  float value = 1;
+}
+
+// Wrapper message for `int64`.
+//
+// The JSON representation for `Int64Value` is JSON string.
+message Int64Value {
+  // The int64 value.
+  int64 value = 1;
+}
+
+// Wrapper message for `uint64`.
+//
+// The JSON representation for `UInt64Value` is JSON string.
+message UInt64Value {
+  // The uint64 value.
+  uint64 value = 1;
+}
+
+// Wrapper message for `int32`.
+//
+// The JSON representation for `Int32Value` is JSON number.
+message Int32Value {
+  // The int32 value.
+  int32 value = 1;
+}
+
+// Wrapper message for `uint32`.
+//
+// The JSON representation for `UInt32Value` is JSON number.
+message UInt32Value {
+  // The uint32 value.
+  uint32 value = 1;
+}
+
+// Wrapper message for `bool`.
+//
+// The JSON representation for `BoolValue` is JSON `true` and `false`.
+message BoolValue {
+  // The bool value.
+  bool value = 1;
+}
+
+// Wrapper message for `string`.
+//
+// The JSON representation for `StringValue` is JSON string.
+message StringValue {
+  // The string value.
+  string value = 1;
+}
+
+// Wrapper message for `bytes`.
+//
+// The JSON representation for `BytesValue` is JSON string.
+message BytesValue {
+  // The bytes value.
+  bytes value = 1;
+}
diff --git a/src/main/proto/tensorflow/core/example/example.proto b/src/main/proto/tensorflow/core/example/example.proto
new file mode 100644
index 0000000..9f762fb
--- /dev/null
+++ b/src/main/proto/tensorflow/core/example/example.proto
@@ -0,0 +1,301 @@
+// Protocol messages for describing input data Examples for machine learning
+// model training or inference.
+syntax = "proto3";
+
+package tensorflow;
+
+import "tensorflow/core/example/feature.proto";
+
+option cc_enable_arenas = true;
+option java_outer_classname = "ExampleProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.example";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/example/example_protos_go_proto";
+
+// An Example is a mostly-normalized data format for storing data for
+// training and inference.  It contains a key-value store (features); where
+// each key (string) maps to a Feature message (which is oneof packed BytesList,
+// FloatList, or Int64List).  This flexible and compact format allows the
+// storage of large amounts of typed data, but requires that the data shape
+// and use be determined by the configuration files and parsers that are used to
+// read and write this format.  That is, the Example is mostly *not* a
+// self-describing format.  In TensorFlow, Examples are read in row-major
+// format, so any configuration that describes data with rank-2 or above
+// should keep this in mind. If you flatten a matrix into a FloatList it should
+// be stored as [ row 0 ... row 1 ... row M-1 ]
+//
+// An Example for a movie recommendation application:
+//   features {
+//     feature {
+//       key: "age"
+//       value { float_list {
+//         value: 29.0
+//       }}
+//     }
+//     feature {
+//       key: "movie"
+//       value { bytes_list {
+//         value: "The Shawshank Redemption"
+//         value: "Fight Club"
+//       }}
+//     }
+//     feature {
+//       key: "movie_ratings"
+//       value { float_list {
+//         value: 9.0
+//         value: 9.7
+//       }}
+//     }
+//     feature {
+//       key: "suggestion"
+//       value { bytes_list {
+//         value: "Inception"
+//       }}
+//     }
+//     # Note that this feature exists to be used as a label in training.
+//     # E.g., if training a logistic regression model to predict purchase
+//     # probability in our learning tool we would set the label feature to
+//     # "suggestion_purchased".
+//     feature {
+//       key: "suggestion_purchased"
+//       value { float_list {
+//         value: 1.0
+//       }}
+//     }
+//     # Similar to "suggestion_purchased" above this feature exists to be used
+//     # as a label in training.
+//     # E.g., if training a linear regression model to predict purchase
+//     # price in our learning tool we would set the label feature to
+//     # "purchase_price".
+//     feature {
+//       key: "purchase_price"
+//       value { float_list {
+//         value: 9.99
+//       }}
+//     }
+//  }
+//
+// A conformant Example data set obeys the following conventions:
+//   - If a Feature K exists in one example with data type T, it must be of
+//       type T in all other examples when present. It may be omitted.
+//   - The number of instances of Feature K list data may vary across examples,
+//       depending on the requirements of the model.
+//   - If a Feature K doesn't exist in an example, a K-specific default will be
+//       used, if configured.
+//   - If a Feature K exists in an example but contains no items, the intent
+//       is considered to be an empty tensor and no default will be used.
+
+message Example {
+  Features features = 1;
+}
+
+// A SequenceExample is an Example representing one or more sequences, and
+// some context.  The context contains features which apply to the entire
+// example. The feature_lists contain a key, value map where each key is
+// associated with a repeated set of Features (a FeatureList).
+// A FeatureList thus represents the values of a feature identified by its key
+// over time / frames.
+//
+// Below is a SequenceExample for a movie recommendation application recording a
+// sequence of ratings by a user. The time-independent features ("locale",
+// "age", "favorites") describing the user are part of the context. The sequence
+// of movies the user rated are part of the feature_lists. For each movie in the
+// sequence we have information on its name and actors and the user's rating.
+// This information is recorded in three separate feature_list(s).
+// In the example below there are only two movies. All three feature_list(s),
+// namely "movie_ratings", "movie_names", and "actors" have a feature value for
+// both movies. Note, that "actors" is itself a bytes_list with multiple
+// strings per movie.
+//
+// context: {
+//   feature: {
+//     key  : "locale"
+//     value: {
+//       bytes_list: {
+//         value: [ "pt_BR" ]
+//       }
+//     }
+//   }
+//   feature: {
+//     key  : "age"
+//     value: {
+//       float_list: {
+//         value: [ 19.0 ]
+//       }
+//     }
+//   }
+//   feature: {
+//     key  : "favorites"
+//     value: {
+//       bytes_list: {
+//         value: [ "Majesty Rose", "Savannah Outen", "One Direction" ]
+//       }
+//     }
+//   }
+// }
+// feature_lists: {
+//   feature_list: {
+//     key  : "movie_ratings"
+//     value: {
+//       feature: {
+//         float_list: {
+//           value: [ 4.5 ]
+//         }
+//       }
+//       feature: {
+//         float_list: {
+//           value: [ 5.0 ]
+//         }
+//       }
+//     }
+//   }
+//   feature_list: {
+//     key  : "movie_names"
+//     value: {
+//       feature: {
+//         bytes_list: {
+//           value: [ "The Shawshank Redemption" ]
+//         }
+//       }
+//       feature: {
+//         bytes_list: {
+//           value: [ "Fight Club" ]
+//         }
+//       }
+//     }
+//   }
+//   feature_list: {
+//     key  : "actors"
+//     value: {
+//       feature: {
+//         bytes_list: {
+//           value: [ "Tim Robbins", "Morgan Freeman" ]
+//         }
+//       }
+//       feature: {
+//         bytes_list: {
+//           value: [ "Brad Pitt", "Edward Norton", "Helena Bonham Carter" ]
+//         }
+//       }
+//     }
+//   }
+// }
+//
+// A conformant SequenceExample data set obeys the following conventions:
+//
+// Context:
+//   - All conformant context features K must obey the same conventions as
+//     a conformant Example's features (see above).
+// Feature lists:
+//   - A FeatureList L may be missing in an example; it is up to the
+//     parser configuration to determine if this is allowed or considered
+//     an empty list (zero length).
+//   - If a FeatureList L exists, it may be empty (zero length).
+//   - If a FeatureList L is non-empty, all features within the FeatureList
+//     must have the same data type T. Even across SequenceExamples, the type T
+//     of the FeatureList identified by the same key must be the same. An entry
+//     without any values may serve as an empty feature.
+//   - If a FeatureList L is non-empty, it is up to the parser configuration
+//     to determine if all features within the FeatureList must
+//     have the same size.  The same holds for this FeatureList across multiple
+//     examples.
+//   - For sequence modeling, e.g.:
+//        http://colah.github.io/posts/2015-08-Understanding-LSTMs/
+//        https://github.com/tensorflow/nmt
+//     the feature lists represent a sequence of frames.
+//     In this scenario, all FeatureLists in a SequenceExample have the same
+//     number of Feature messages, so that the ith element in each FeatureList
+//     is part of the ith frame (or time step).
+// Examples of conformant and non-conformant examples' FeatureLists:
+//
+// Conformant FeatureLists:
+//    feature_lists: { feature_list: {
+//      key: "movie_ratings"
+//      value: { feature: { float_list: { value: [ 4.5 ] } }
+//               feature: { float_list: { value: [ 5.0 ] } } }
+//    } }
+//
+// Non-conformant FeatureLists (mismatched types):
+//    feature_lists: { feature_list: {
+//      key: "movie_ratings"
+//      value: { feature: { float_list: { value: [ 4.5 ] } }
+//               feature: { int64_list: { value: [ 5 ] } } }
+//    } }
+//
+// Conditionally conformant FeatureLists, the parser configuration determines
+// if the feature sizes must match:
+//    feature_lists: { feature_list: {
+//      key: "movie_ratings"
+//      value: { feature: { float_list: { value: [ 4.5 ] } }
+//               feature: { float_list: { value: [ 5.0, 6.0 ] } } }
+//    } }
+//
+// Conformant pair of SequenceExample
+//    feature_lists: { feature_list: {
+//      key: "movie_ratings"
+//      value: { feature: { float_list: { value: [ 4.5 ] } }
+//               feature: { float_list: { value: [ 5.0 ] } } }
+//    } }
+// and:
+//    feature_lists: { feature_list: {
+//      key: "movie_ratings"
+//      value: { feature: { float_list: { value: [ 4.5 ] } }
+//               feature: { float_list: { value: [ 5.0 ] } }
+//               feature: { float_list: { value: [ 2.0 ] } } }
+//    } }
+//
+// Conformant pair of SequenceExample
+//    feature_lists: { feature_list: {
+//      key: "movie_ratings"
+//      value: { feature: { float_list: { value: [ 4.5 ] } }
+//               feature: { float_list: { value: [ 5.0 ] } } }
+//    } }
+// and:
+//    feature_lists: { feature_list: {
+//      key: "movie_ratings"
+//      value: { }
+//    } }
+//
+// Conditionally conformant pair of SequenceExample, the parser configuration
+// determines if the second feature_lists is consistent (zero-length) or
+// invalid (missing "movie_ratings"):
+//    feature_lists: { feature_list: {
+//      key: "movie_ratings"
+//      value: { feature: { float_list: { value: [ 4.5 ] } }
+//               feature: { float_list: { value: [ 5.0 ] } } }
+//    } }
+// and:
+//    feature_lists: { }
+//
+// Non-conformant pair of SequenceExample (mismatched types)
+//    feature_lists: { feature_list: {
+//      key: "movie_ratings"
+//      value: { feature: { float_list: { value: [ 4.5 ] } }
+//               feature: { float_list: { value: [ 5.0 ] } } }
+//    } }
+// and:
+//    feature_lists: { feature_list: {
+//      key: "movie_ratings"
+//      value: { feature: { int64_list: { value: [ 4 ] } }
+//               feature: { int64_list: { value: [ 5 ] } }
+//               feature: { int64_list: { value: [ 2 ] } } }
+//    } }
+//
+// Conditionally conformant pair of SequenceExample; the parser configuration
+// determines if the feature sizes must match:
+//    feature_lists: { feature_list: {
+//      key: "movie_ratings"
+//      value: { feature: { float_list: { value: [ 4.5 ] } }
+//               feature: { float_list: { value: [ 5.0 ] } } }
+//    } }
+// and:
+//    feature_lists: { feature_list: {
+//      key: "movie_ratings"
+//      value: { feature: { float_list: { value: [ 4.0 ] } }
+//               feature: { float_list: { value: [ 5.0, 3.0 ] } }
+//    } }
+
+message SequenceExample {
+  Features context = 1;
+  FeatureLists feature_lists = 2;
+}
diff --git a/src/main/proto/tensorflow/core/example/feature.proto b/src/main/proto/tensorflow/core/example/feature.proto
new file mode 100644
index 0000000..7f9fad9
--- /dev/null
+++ b/src/main/proto/tensorflow/core/example/feature.proto
@@ -0,0 +1,110 @@
+// Protocol messages for describing features for machine learning model
+// training or inference.
+//
+// There are three base Feature types:
+//   - bytes
+//   - float
+//   - int64
+//
+// A Feature contains Lists which may hold zero or more values.  These
+// lists are the base values BytesList, FloatList, Int64List.
+//
+// Features are organized into categories by name.  The Features message
+// contains the mapping from name to Feature.
+//
+// Example Features for a movie recommendation application:
+//   feature {
+//     key: "age"
+//     value { float_list {
+//       value: 29.0
+//     }}
+//   }
+//   feature {
+//     key: "movie"
+//     value { bytes_list {
+//       value: "The Shawshank Redemption"
+//       value: "Fight Club"
+//     }}
+//   }
+//   feature {
+//     key: "movie_ratings"
+//     value { float_list {
+//       value: 9.0
+//       value: 9.7
+//     }}
+//   }
+//   feature {
+//     key: "suggestion"
+//     value { bytes_list {
+//       value: "Inception"
+//     }}
+//   }
+//   feature {
+//     key: "suggestion_purchased"
+//     value { int64_list {
+//       value: 1
+//     }}
+//   }
+//   feature {
+//     key: "purchase_price"
+//     value { float_list {
+//       value: 9.99
+//     }}
+//   }
+//
+
+syntax = "proto3";
+
+package tensorflow;
+
+option cc_enable_arenas = true;
+option java_outer_classname = "FeatureProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.example";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/example/example_protos_go_proto";
+
+// LINT.IfChange
+// Containers to hold repeated fundamental values.
+message BytesList {
+  repeated bytes value = 1;
+}
+message FloatList {
+  repeated float value = 1 [packed = true];
+}
+message Int64List {
+  repeated int64 value = 1 [packed = true];
+}
+
+// Containers for non-sequential data.
+message Feature {
+  // Each feature can be exactly one kind.
+  oneof kind {
+    BytesList bytes_list = 1;
+    FloatList float_list = 2;
+    Int64List int64_list = 3;
+  }
+}
+
+message Features {
+  // Map from feature name to feature.
+  map<string, Feature> feature = 1;
+}
+
+// Containers for sequential data.
+//
+// A FeatureList contains lists of Features.  These may hold zero or more
+// Feature values.
+//
+// FeatureLists are organized into categories by name.  The FeatureLists message
+// contains the mapping from name to FeatureList.
+//
+message FeatureList {
+  repeated Feature feature = 1;
+}
+
+message FeatureLists {
+  // Map from feature name to feature list.
+  map<string, FeatureList> feature_list = 1;
+}
+// LINT.ThenChange(
+//     https://www.tensorflow.org/code/tensorflow/python/training/training.py)
diff --git a/src/main/proto/tensorflow/core/framework/allocation_description.proto b/src/main/proto/tensorflow/core/framework/allocation_description.proto
new file mode 100644
index 0000000..f18caa4
--- /dev/null
+++ b/src/main/proto/tensorflow/core/framework/allocation_description.proto
@@ -0,0 +1,29 @@
+syntax = "proto3";
+
+package tensorflow;
+
+option cc_enable_arenas = true;
+option java_outer_classname = "AllocationDescriptionProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/allocation_description_go_proto";
+
+message AllocationDescription {
+  // Total number of bytes requested
+  int64 requested_bytes = 1;
+
+  // Total number of bytes allocated if known
+  int64 allocated_bytes = 2;
+
+  // Name of the allocator used
+  string allocator_name = 3;
+
+  // Identifier of the allocated buffer if known
+  int64 allocation_id = 4;
+
+  // Set if this tensor only has one remaining reference
+  bool has_single_reference = 5;
+
+  // Address of the allocation.
+  uint64 ptr = 6;
+}
diff --git a/src/main/proto/tensorflow/core/framework/attr_value.proto b/src/main/proto/tensorflow/core/framework/attr_value.proto
new file mode 100644
index 0000000..2bd5b55
--- /dev/null
+++ b/src/main/proto/tensorflow/core/framework/attr_value.proto
@@ -0,0 +1,64 @@
+syntax = "proto3";
+
+package tensorflow;
+
+import "tensorflow/core/framework/tensor.proto";
+import "tensorflow/core/framework/tensor_shape.proto";
+import "tensorflow/core/framework/types.proto";
+
+option cc_enable_arenas = true;
+option java_outer_classname = "AttrValueProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/attr_value_go_proto";
+
+// Protocol buffer representing the value for an attr used to configure an Op.
+// Comment indicates the corresponding attr type.  Only the field matching the
+// attr type may be filled.
+message AttrValue {
+  // LINT.IfChange
+  message ListValue {
+    repeated bytes s = 2;                        // "list(string)"
+    repeated int64 i = 3 [packed = true];        // "list(int)"
+    repeated float f = 4 [packed = true];        // "list(float)"
+    repeated bool b = 5 [packed = true];         // "list(bool)"
+    repeated DataType type = 6 [packed = true];  // "list(type)"
+    repeated TensorShapeProto shape = 7;         // "list(shape)"
+    repeated TensorProto tensor = 8;             // "list(tensor)"
+    repeated NameAttrList func = 9;              // "list(attr)"
+  }
+  // LINT.ThenChange(//tensorflow/c/c_api.cc)
+
+  oneof value {
+    bytes s = 2;                 // "string"
+    int64 i = 3;                 // "int"
+    float f = 4;                 // "float"
+    bool b = 5;                  // "bool"
+    DataType type = 6;           // "type"
+    TensorShapeProto shape = 7;  // "shape"
+    TensorProto tensor = 8;      // "tensor"
+    ListValue list = 1;          // any "list(...)"
+
+    // "func" represents a function. func.name is a function's name or
+    // a primitive op's name. func.attr.first is the name of an attr
+    // defined for that function. func.attr.second is the value for
+    // that attr in the instantiation.
+    NameAttrList func = 10;
+
+    // This is a placeholder only used in nodes defined inside a
+    // function.  It indicates the attr value will be supplied when
+    // the function is instantiated.  For example, let us suppose a
+    // node "N" in function "FN". "N" has an attr "A" with value
+    // placeholder = "foo". When FN is instantiated with attr "foo"
+    // set to "bar", the instantiated node N's attr A will have been
+    // given the value "bar".
+    string placeholder = 9;
+  }
+}
+
+// A list of attr names and their values. The whole list is attached
+// with a string name.  E.g., MatMul[T=float].
+message NameAttrList {
+  string name = 1;
+  map<string, AttrValue> attr = 2;
+}
diff --git a/src/main/proto/tensorflow/core/framework/cost_graph.proto b/src/main/proto/tensorflow/core/framework/cost_graph.proto
new file mode 100644
index 0000000..42c9e23
--- /dev/null
+++ b/src/main/proto/tensorflow/core/framework/cost_graph.proto
@@ -0,0 +1,89 @@
+syntax = "proto3";
+
+package tensorflow;
+
+import "tensorflow/core/framework/tensor_shape.proto";
+import "tensorflow/core/framework/types.proto";
+
+option cc_enable_arenas = true;
+option java_outer_classname = "CostGraphProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/cost_graph_go_proto";
+
+message CostGraphDef {
+  message Node {
+    // The name of the node. Names are globally unique.
+    string name = 1;
+
+    // The device of the node. Can be empty if the node is mapped to the
+    // default partition or partitioning hasn't been run yet.
+    string device = 2;
+
+    // The id of the node. Node ids are only unique inside a partition.
+    int32 id = 3;
+
+    // Inputs of this node. They must be executed before this node can be
+    // executed. An input is a particular output of another node, specified
+    // by the node id and the output index.
+    message InputInfo {
+      int32 preceding_node = 1;
+      int32 preceding_port = 2;
+    }
+    repeated InputInfo input_info = 4;
+
+    // Outputs of this node.
+    message OutputInfo {
+      int64 size = 1;
+      // If >= 0, the output is an alias of an input. Note that an alias input
+      // may itself be an alias. The algorithm will therefore need to follow
+      // those pointers.
+      int64 alias_input_port = 2;
+      TensorShapeProto shape = 3;
+      DataType dtype = 4;
+    }
+    repeated OutputInfo output_info = 5;
+
+    // Temporary memory used by this node.
+    int64 temporary_memory_size = 6;
+
+    // Persistent memory used by this node.
+    int64 persistent_memory_size = 12;
+
+    int64 host_temp_memory_size = 10 [deprecated = true];
+    int64 device_temp_memory_size = 11 [deprecated = true];
+    int64 device_persistent_memory_size = 16 [deprecated = true];
+
+    // Estimate of the computational cost of this node, in microseconds.
+    int64 compute_cost = 9;
+
+    // Analytical estimate of the computational cost of this node, in
+    // microseconds.
+    int64 compute_time = 14;
+
+    // Analytical estimate of the memory access cost of this node, in
+    // microseconds.
+    int64 memory_time = 15;
+
+    // If true, the output is permanent: it can't be discarded, because this
+    // node is part of the "final output". Nodes may depend on final nodes.
+    bool is_final = 7;
+
+    // Ids of the control inputs for this node.
+    repeated int32 control_input = 8;
+
+    // Are the costs inaccurate?
+    bool inaccurate = 17;
+  }
+  repeated Node node = 1;
+
+  // Total cost of this graph, typically used for balancing decisions.
+  message AggregatedCost {
+    // Aggregated cost value.
+    float cost = 1;
+
+    // Aggregated cost dimension (e.g. 'memory', 'compute', 'network').
+    string dimension = 2;
+  }
+  repeated AggregatedCost cost = 2;
+}
diff --git a/src/main/proto/tensorflow/core/framework/full_type.proto b/src/main/proto/tensorflow/core/framework/full_type.proto
new file mode 100644
index 0000000..19e8da5
--- /dev/null
+++ b/src/main/proto/tensorflow/core/framework/full_type.proto
@@ -0,0 +1,310 @@
+syntax = "proto3";
+
+package tensorflow;
+
+option cc_enable_arenas = true;
+option java_outer_classname = "FullTypeProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/full_type_go_proto";
+
+// LINT.IfChange
+// Experimental. Represents the complete type information of a TensorFlow value.
+enum FullTypeId {
+  // The default represents an uninitialized values.
+  TFT_UNSET = 0;
+
+  // Type symbols. Used to construct more complex type expressions like
+  // algebraic data types.
+
+  // Type variables may serve as placeholder for any other type ID in type
+  // templates.
+  //
+  // Examples:
+  //   TFT_DATASET[TFT_VAR["T"]] is a Dataset returning a type indicated by "T".
+  //   TFT_TENSOR[TFT_VAR["T"]] is a Tensor of n element type indicated by "T".
+  //   TFT_TENSOR[TFT_VAR["T"]], TFT_TENSOR[TFT_VAR["T"]] are two tensors of
+  //     identical element types.
+  //   TFT_TENSOR[TFT_VAR["P"]], TFT_TENSOR[TFT_VAR["Q"]] are two tensors of
+  //     independent element types.
+  //
+  TFT_VAR = 1;
+
+  // Wildcard type. Describes a parameter of unknown type. In TensorFlow, that
+  // can mean either a "Top" type (accepts any type), or a dynamically typed
+  // object whose type is unknown in context.
+  // Important: "unknown" does not necessarily mean undeterminable!
+  TFT_ANY = 2;
+
+  // The algebraic product type. This is an algebraic type that may be used just
+  // for logical grouping. Not to confused with TFT_TUPLE which describes a
+  // concrete object of several elements.
+  //
+  // Example:
+  //   TFT_DATASET[TFT_PRODUCT[TFT_TENSOR[TFT_INT32], TFT_TENSOR[TFT_FLOAT64]]]
+  //     is a Dataset producing two tensors, an integer one and a float one.
+  //
+  TFT_PRODUCT = 3;
+
+  // Represents a named field, with the name stored in the attribute.
+  //
+  // Parametrization:
+  //   TFT_NAMED[<type>]{<name>}
+  //   * <type> is the type of the field
+  //   * <name> is the field name, as string (thpugh can theoretically be an int
+  //     as well)
+  //
+  // Example:
+  //   TFT_RECORD[
+  //     TFT_NAMED[TFT_TENSOR[TFT_INT32]]{'foo'},
+  //     TFT_NAMED[TFT_TENSOR[TFT_FLOAT32]]{'bar'},
+  //   ]
+  //     is a structure with two fields, an int tensor "foo" and a float tensor
+  //     "bar".
+  TFT_NAMED = 4;
+
+  // Template definition. Expands the variables by repeating a template as
+  // arguments of container.
+  //
+  // Parametrization:
+  //   TFT_FOR_EACH[<container_type>, <template>, <expansions>]
+  //   * <container_type> is the type of the container that the template will be
+  //     expanded into
+  //   * <template> is any type definition that potentially contains type
+  //     variables
+  //   * <expansions> is a TFT_VAR and may include more types in the future
+  //
+  // Example:
+  //   TFT_FOR_EACH[
+  //         TFT_PRODUCT,
+  //         TFT_TENSOR[TFT_VAR["t"]],
+  //         TFT_VAR["t"]
+  //     ]
+  //     will substitute a T = TFT_INT32 to TFT_PRODUCT[TFT_TENSOR[TFT_INT32]]
+  //     and a T = (TFT_INT32, TFT_INT64) to
+  //     TFT_PRODUCT[TFT_TENSOR[TFT_INT32], TFT_TENSOR[TFT_INT64]].
+  TFT_FOR_EACH = 20;
+
+  // Callable types describe functions and ops.
+  //
+  // Parametrization:
+  //   TFT_CALLABLE[<arg type>, <return type>]
+  //   * <arg type> is the type of the arguments; TFT_PRODUCT represents
+  //   multiple
+  //     arguments.
+  //   * <return type> is the return type; TFT_PRODUCT represents multiple
+  //     return values (that means that callables returning multiple things
+  //     don't necessarily return a single tuple).
+  //
+  // Example:
+  //   TFT_CALLABLE[
+  //     TFT_ANY,
+  //     TFT_PRODUCT[TFT_TENSOR[TFT_INT32], TFT_TENSOR[TFT_FLOAT64]],
+  //   ]
+  //     is a callable with unspecified (for now) input arguments, and
+  //     two return values of type tensor.
+  //
+  TFT_CALLABLE = 100;
+
+  // Concrete type IDs, representing "proper" data types that can describe
+  // runtime TensorFlow objects.
+
+  // The usual Tensor. This is a parametric type.
+  //
+  // Parametrization:
+  //   TFT_TENSOR[<element type>, <shape type>]
+  //   * <element type> is currently limited to one of the element types
+  //     defined below.
+  //   * <shape type> is not yet defined, and may only be TFT_UNKNOWN for now.
+  //
+  // A TFT_SHAPE type will be defined in the future.
+  //
+  // Example:
+  //   TFT_TENSOR[TFT_INT32, TFT_UNKNOWN]
+  //     is a Tensor of int32 element type and unknown shape.
+  //
+  // TODO(mdan): Define TFT_SHAPE and add more examples.
+  TFT_TENSOR = 1000;
+
+  // Array (or tensorflow::TensorList in the variant type registry).
+  // Note: this is not to be confused with the deprecated `TensorArray*` ops
+  // which are not supported by FullType.
+  // This type represents a random-access list whose elements can be
+  // described by a single type. Although immutable, Array is expected to
+  // support efficient mutation semantics (i.e. element update) in the
+  // user-facing API.
+  // The element type may be generic or even TFT_ANY for a heterogenous list.
+  //
+  // Parametrization:
+  //   TFT_ARRAY[<element type>]
+  //   * <element type> may be any concrete type.
+  //
+  // Examples:
+  //   TFT_ARRAY[TFT_TENSOR[TFT_INT32]] is a TensorArray holding int32 Tensors
+  //     of any shape.
+  //   TFT_ARRAY[TFT_TENSOR[TFT_UNKNOWN]] is a TensorArray holding Tensors of
+  //     mixed element types.
+  //   TFT_ARRAY[TFT_UNKNOWN] is a TensorArray holding any element type.
+  //   TFT_ARRAY[] is equivalent to TFT_ARRAY[TFT_UNKNOWN].
+  //   TFT_ARRAY[TFT_ARRAY[]] is an array or arrays (of unknown types).
+  TFT_ARRAY = 1001;
+
+  // Optional (or tensorflow::OptionalVariant in the variant type registry).
+  // This type represents a value that may either hold an element of a single
+  // specified type, or nothing at all.
+  //
+  // Parametrization:
+  //   TFT_OPTIONAL[<element type>]
+  //   * <element type> may be any concrete type.
+  //
+  // Examples:
+  //   TFT_OPTIONAL[TFT_TENSOR[TFT_INT32]] is an Optional holding an int32
+  //     Tensor of any shape.
+  TFT_OPTIONAL = 1002;
+
+  // Literal types describe compile-time constant values.
+  // Literal types may also participate in dependent types.
+  //
+  // Parametrization:
+  //   TFT_LITERAL[<value type>]{<value>}
+  //   * <value type> may be any concrete type compatible that can hold <value>
+  //   * <value> is the type's attribute, and holds the actual literal value
+  //
+  // Examples:
+  //   TFT_LITERAL[TFT_INT32]{1} is the compile-time constant 1.
+  TFT_LITERAL = 1003;
+
+  // Encoding types describe a value of a certain type, encoded as a different
+  // type.
+  //
+  // Parametrization:
+  //   TFT_ENCODED[<encoded type>, <encoding type>]
+  //   * <encoded type> may be any type
+  //   * <encoding type> may be any type
+  //
+  // Examples:
+  //   TFT_ENCODING[TFT_INT32, TFT_STRING] is an integer encoded as string.
+  TFT_ENCODED = 1004;
+
+  // The type of "shape tensors" where the runtime value is the shape of
+  // some tensor(s), i.e. the output of tf.shape.
+  // Shape tensors have special, host-only placement, in contrast to
+  // TFT_TENSOR[TFT_INT32] which is the type of a normal numeric tensor
+  // with no special placement.
+  //
+  // Examples:
+  //   TFT_SHAPE_TENSOR[TFT_INT32] is the most common
+  //   TFT_SHAPE_TENSOR[TFT_INT64] is also allowed
+  TFT_SHAPE_TENSOR = 1005;
+
+  // Type attributes. These always appear in the parametrization of a type,
+  // never alone. For example, there is no such thing as a "bool" TensorFlow
+  // object (for now).
+
+  // The bool element type.
+  // TODO(mdan): Quantized types, legacy representations (e.g. ref)
+  TFT_BOOL = 200;
+  // Integer element types.
+  TFT_UINT8 = 201;
+  TFT_UINT16 = 202;
+  TFT_UINT32 = 203;
+  TFT_UINT64 = 204;
+  TFT_INT8 = 205;
+  TFT_INT16 = 206;
+  TFT_INT32 = 207;
+  TFT_INT64 = 208;
+  // Floating-point element types.
+  TFT_HALF = 209;
+  TFT_FLOAT = 210;
+  TFT_DOUBLE = 211;
+  TFT_BFLOAT16 = 215;
+  // Complex element types.
+  // TODO(mdan): Represent as TFT_COMPLEX[TFT_DOUBLE] instead?
+  TFT_COMPLEX64 = 212;
+  TFT_COMPLEX128 = 213;
+  // The string element type.
+  TFT_STRING = 214;
+
+  // Other types that we don't know yet whether they will become part of the
+  // core type system or be consisdered third-party (and consequently moved to
+  // user-defined type mechanisms). Presently, they are effectively in the core
+  // type system, because key compilation passes like Placer account for their
+  // existence.
+
+  // Datasets created by tf.data ops and APIs. Datasets have generator/iterable
+  // semantics, that is, one can construct an iterator from them. Like
+  // Array, they are considered to return elements that can be described
+  // by a single type. Unlike Array, they do not support random access or
+  // mutation, and can potentially produce an infinite number of elements.
+  // A datasets can produce logical structures (e.g. multiple elements). This
+  // is expressed using TFT_PRODUCT.
+  //
+  //
+  // Parametrization: TFT_DATASET[<element type>].
+  //   * <element type> may be a concrete type or a type symbol. It represents
+  //     the data type of the elements produced by the dataset.
+  //
+  // Examples:
+  //   TFT_DATSET[TFT_TENSOR[TFT_INT32]] is a Dataset producing single int32
+  //     Tensors of unknown shape.
+  //   TFT_DATSET[TFT_PRODUCT[TFT_TENSOR[TFT_INT32], TFT_TENSOR[TFT_FLOAT32]] is
+  //     a Dataset producing pairs of Tensors, one integer and one float.
+  // Note: The high ID number is to prepare for the eventuality that Datasets
+  // will be supported by user types in the future.
+  TFT_DATASET = 10102;
+
+  // A ragged tensor created by tf.ragged ops and APIs.
+  //
+  // Parametrization: TFT_RAGGED[<element_type>].
+  TFT_RAGGED = 10103;
+
+  // Iterators created by tf.data ops and APIs. Very similar to Datasets, except
+  // they are mutable.
+  //
+  //
+  // Parametrization: TFT_ITERATOR[<element type>].
+  //   * <element type> may be a concrete type or a type symbol. It represents
+  //     the data type of the elements produced by the dataset.
+  TFT_ITERATOR = 10104;
+
+  // A mutex lock tensor, produced by tf.raw_ops.MutexLock.
+  // Unlike strict execution models, where ownership of a lock is denoted by
+  // "running after the lock has been acquired", in non-strict mode, lock
+  // ownership is in the true sense: "the op argument representing the lock is
+  // available".
+  // Mutex locks are the dynamic counterpart of control dependencies.
+  // TODO(mdan): Properly document this thing.
+  //
+  // Parametrization: TFT_MUTEX_LOCK[].
+  TFT_MUTEX_LOCK = 10202;
+
+  // The equivalent of a Tensor with DT_VARIANT dtype, kept here to simplify
+  // translation. This type should not normally appear after type inference.
+  // Note that LEGACY_VARIANT != ANY: TENSOR[INT32] is a subtype of ANY, but is
+  // not a subtype of LEGACY_VARIANT.
+  TFT_LEGACY_VARIANT = 10203;
+}
+
+// Highly experimental and very likely to change.
+// This encoding uses tags instead of dedicated messages for regularity. In
+// particular the encoding imposes no restrictions on what the parameters of any
+// type should be, which in particular needs to be true for type symbols.
+message FullTypeDef {
+  // The principal type represented by this object. This may be a concrete type
+  // (Tensor, Dataset) a type variable (used for dependent types) a type
+  // symbol (Any, Union). See FullTypeId for details.
+  FullTypeId type_id = 1;
+
+  repeated FullTypeDef args = 2;
+
+  // Literal values of this type object, if the type admits one.
+  // For example, a type variable admits a string attribute - its name.
+  // Shape-related types may admit int attributes - their static shape values.
+  // Fields for more data types to be added as needed.
+  oneof attr {
+    string s = 3;
+    int64 i = 4;
+    // TODO(mdan): list/tensor, map? Need to reconcile with TFT_RECORD, etc.
+  }
+}
+// LINT.ThenChange(../ir/types/attributes.td)
diff --git a/src/main/proto/tensorflow/core/framework/function.proto b/src/main/proto/tensorflow/core/framework/function.proto
new file mode 100644
index 0000000..6e59df7
--- /dev/null
+++ b/src/main/proto/tensorflow/core/framework/function.proto
@@ -0,0 +1,136 @@
+syntax = "proto3";
+
+package tensorflow;
+
+import "tensorflow/core/framework/attr_value.proto";
+import "tensorflow/core/framework/node_def.proto";
+import "tensorflow/core/framework/op_def.proto";
+
+option cc_enable_arenas = true;
+option java_outer_classname = "FunctionProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/function_go_proto";
+
+// A library is a set of named functions.
+message FunctionDefLibrary {
+  repeated FunctionDef function = 1;
+  repeated GradientDef gradient = 2;
+  repeated RegisteredGradient registered_gradients = 3;
+}
+
+// A function can be instantiated when the runtime can bind every attr
+// with a value. When a GraphDef has a call to a function, it must
+// have binding for every attr defined in the signature.
+//
+// TODO(zhifengc):
+//   * device spec, etc.
+message FunctionDef {
+  // The definition of the function's name, arguments, return values,
+  // attrs etc.
+  OpDef signature = 1;
+
+  // Attributes specific to this function definition.
+  map<string, AttrValue> attr = 5;
+
+  // Attributes for function arguments. These attributes are the same set of
+  // valid attributes as to _Arg nodes.
+  message ArgAttrs {
+    map<string, AttrValue> attr = 1;
+  }
+  map<uint32, ArgAttrs> arg_attr = 7;
+
+  // Unique IDs for each resource argument, used to track aliasing resources. If
+  // Argument A and Argument B alias each other, then
+  // resource_arg_unique_ids[A.index] == resource_arg_unique_ids[B.index].
+  //
+  // If this field is empty, none of the arguments could alias; otherwise, every
+  // resource argument should have an entry in this field.
+  //
+  // When instantiated, the unique IDs will be attached to the _Arg nodes'
+  // "_resource_arg_unique_id" attribute.
+  map<uint32, uint32> resource_arg_unique_id = 8;
+
+  // NOTE: field id 2 deleted on Jan 11, 2017, GraphDef version 21.
+  reserved 2;
+
+  // In both of the following fields, there is the need to specify an
+  // output that is used as either the input to another node (in
+  // `node_def`) or as a return value of the function (in `ret`).
+  // Unlike the NodeDefs in GraphDef, we need to be able to specify a
+  // list in some cases (instead of just single outputs).  Also, we
+  // need to be able to deal with lists of unknown length (so the
+  // output index may not be known at function definition time).  So
+  // we use the following format instead:
+  // * "fun_in" where "fun_in" is the name of a function input arg in
+  //   the `signature` field above.  This represents that input, whether
+  //   it is a single tensor or a list.
+  // * "fun_in:0" gives the first element of a function input arg (a
+  //   non-list input is considered a list of length 1 for these
+  //   purposes).
+  // * "node:out" where "node" is the name of a node in `node_def` and
+  //   "out" is the name one of its op's output arguments (the name
+  //   comes from the OpDef of the node's op). This represents that
+  //   node's output, whether it is a single tensor or a list.
+  //   Note: We enforce that an op's output arguments are never
+  //   renamed in the backwards-compatibility test.
+  // * "node:out:0" gives the first element of a node output arg (a
+  //   non-list output is considered a list of length 1 for these
+  //   purposes).
+  //
+  // NOT CURRENTLY SUPPORTED (but may be in the future):
+  // * "node:out:-1" gives last element in a node output list
+  // * "node:out:1:" gives a list with all but the first element in a
+  //   node output list
+  // * "node:out::-1" gives a list with all but the last element in a
+  //   node output list
+
+  // The body of the function.  Unlike the NodeDefs in a GraphDef, attrs
+  // may have values of type `placeholder` and the `input` field uses
+  // the "output" format above.
+
+  // By convention, "op" in node_def is resolved by consulting with a
+  // user-defined library first. If not resolved, "func" is assumed to
+  // be a builtin op.
+  repeated NodeDef node_def = 3;
+
+  // A mapping from the output arg names from `signature` to the
+  // outputs from `node_def` that should be returned by the function.
+  map<string, string> ret = 4;
+
+  // A mapping from control output names from `signature` to node names in
+  // `node_def` which should be control outputs of this function.
+  map<string, string> control_ret = 6;
+}
+
+// GradientDef defines the gradient function of a function defined in
+// a function library.
+//
+// A gradient function g (specified by gradient_func) for a function f
+// (specified by function_name) must follow the following:
+//
+// The function 'f' must be a numerical function which takes N inputs
+// and produces M outputs. Its gradient function 'g', which is a
+// function taking N + M inputs and produces N outputs.
+//
+// I.e. if we have
+//    (y1, y2, ..., y_M) = f(x1, x2, ..., x_N),
+// then, g is
+//    (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N,
+//                                      dL/dy1, dL/dy2, ..., dL/dy_M),
+// where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the
+// loss function). dL/dx_i is the partial derivative of L with respect
+// to x_i.
+message GradientDef {
+  string function_name = 1;  // The function name.
+  string gradient_func = 2;  // The gradient function's name.
+}
+
+// RegisteredGradient stores a gradient function that is registered in the
+// gradients library and used in the ops of a function in the function library.
+// Unlike GradientDef, these gradients are identified by op type, and not
+// directly linked to any function.
+message RegisteredGradient {
+  string gradient_func = 1;       // The gradient function's name.
+  string registered_op_type = 2;  // The gradient function's registered op type.
+}
diff --git a/src/main/proto/tensorflow/core/framework/graph.proto b/src/main/proto/tensorflow/core/framework/graph.proto
new file mode 100644
index 0000000..8523f13
--- /dev/null
+++ b/src/main/proto/tensorflow/core/framework/graph.proto
@@ -0,0 +1,60 @@
+syntax = "proto3";
+
+package tensorflow;
+
+import "tensorflow/core/framework/function.proto";
+import "tensorflow/core/framework/graph_debug_info.proto";
+import "tensorflow/core/framework/node_def.proto";
+import "tensorflow/core/framework/versions.proto";
+
+option cc_enable_arenas = true;
+option java_outer_classname = "GraphProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/graph_go_proto";
+
+// Represents the graph of operations
+message GraphDef {
+  repeated NodeDef node = 1;
+
+  // Compatibility versions of the graph.  See core/public/version.h for version
+  // history.  The GraphDef version is distinct from the TensorFlow version, and
+  // each release of TensorFlow will support a range of GraphDef versions.
+  VersionDef versions = 4;
+
+  // Deprecated single version field; use versions above instead.  Since all
+  // GraphDef changes before "versions" was introduced were forward
+  // compatible, this field is entirely ignored.
+  int32 version = 3 [deprecated = true];
+
+  // "library" provides user-defined functions.
+  //
+  // Naming:
+  //   * library.function.name are in a flat namespace.
+  //     NOTE: We may need to change it to be hierarchical to support
+  //     different orgs. E.g.,
+  //     { "/google/nn", { ... }},
+  //     { "/google/vision", { ... }}
+  //     { "/org_foo/module_bar", { ... }}
+  //     map<string, FunctionDefLib> named_lib;
+  //   * If node[i].op is the name of one function in "library",
+  //     node[i] is deemed as a function call. Otherwise, node[i].op
+  //     must be a primitive operation supported by the runtime.
+  //
+  //
+  // Function call semantics:
+  //
+  //   * The callee may start execution as soon as some of its inputs
+  //     are ready. The caller may want to use Tuple() mechanism to
+  //     ensure all inputs are ready in the same time.
+  //
+  //   * The consumer of return values may start executing as soon as
+  //     the return values the consumer depends on are ready.  The
+  //     consumer may want to use Tuple() mechanism to ensure the
+  //     consumer does not start until all return values of the callee
+  //     function are ready.
+  FunctionDefLibrary library = 2;
+
+  // Stack traces for the nodes in this graph.
+  GraphDebugInfo debug_info = 5;
+}
diff --git a/src/main/proto/tensorflow/core/framework/graph_debug_info.proto b/src/main/proto/tensorflow/core/framework/graph_debug_info.proto
new file mode 100644
index 0000000..9a49344
--- /dev/null
+++ b/src/main/proto/tensorflow/core/framework/graph_debug_info.proto
@@ -0,0 +1,61 @@
+syntax = "proto2";
+
+package tensorflow;
+
+option cc_enable_arenas = true;
+option java_outer_classname = "GraphDebugInfoProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto";
+
+message GraphDebugInfo {
+  // This represents a file/line location in the source code.
+  message FileLineCol {
+    // File name index, which can be used to retrieve the file name string from
+    // `files`. The value should be between 0 and (len(files)-1)
+    optional int32 file_index = 1;
+
+    // Line number in the file.
+    optional int32 line = 2;
+
+    // Col number in the file line.
+    optional int32 col = 3;
+
+    // Name of function contains the file line.
+    optional string func = 4;
+
+    // Source code contained in this file line.
+    optional string code = 5;
+  }
+
+  // This represents a stack trace which is a ordered list of `FileLineCol`.
+  message StackTrace {
+    repeated FileLineCol file_line_cols = 1;  // Deprecated.
+    repeated fixed64 frame_id = 2 [packed = true];
+  }
+
+  // This stores all the source code file names and can be indexed by the
+  // `file_index`.
+  repeated string files = 1;
+
+  // Stack traces and frames are uniqueified during construction. These maps
+  // index from the unique id for a frame/trace to the value.
+  map<fixed64, FileLineCol> frames_by_id = 4;
+  map<fixed64, StackTrace> traces_by_id = 6;
+
+  map<string, StackTrace> traces = 2;  // Deprecated.
+
+  // This maps a node name to a trace id contained in `traces_by_id`.
+  //
+  // The map key is a mangling of the containing function and op name with
+  // syntax:
+  //   op.name '@' func_name
+  // For ops in the top-level graph, the func_name is the empty string and hence
+  // the `@` may be ommitted.
+  // Note that op names are restricted to a small number of characters which
+  // exclude '@', making it impossible to collide keys of this form. Function
+  // names accept a much wider set of characters.
+  // It would be preferable to avoid mangling and use a tuple key of (op.name,
+  // func_name), but this is not supported with protocol buffers.
+  map<string, fixed64> name_to_trace_id = 5;
+}
diff --git a/src/main/proto/tensorflow/core/framework/node_def.proto b/src/main/proto/tensorflow/core/framework/node_def.proto
new file mode 100644
index 0000000..705e90a
--- /dev/null
+++ b/src/main/proto/tensorflow/core/framework/node_def.proto
@@ -0,0 +1,95 @@
+syntax = "proto3";
+
+package tensorflow;
+
+import "tensorflow/core/framework/attr_value.proto";
+import "tensorflow/core/framework/full_type.proto";
+
+option cc_enable_arenas = true;
+option java_outer_classname = "NodeProto";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/node_def_go_proto";
+
+message NodeDef {
+  // The name given to this operator. Used for naming inputs,
+  // logging, visualization, etc.  Unique within a single GraphDef.
+  // Must match the regexp "[A-Za-z0-9.][A-Za-z0-9_>./]*".
+  string name = 1;
+
+  // The operation name.  There may be custom parameters in attrs.
+  // Op names starting with an underscore are reserved for internal use.
+  string op = 2;
+
+  // Each input is "node:src_output" with "node" being a string name and
+  // "src_output" indicating which output tensor to use from "node". If
+  // "src_output" is 0 the ":0" suffix can be omitted.  Regular inputs
+  // may optionally be followed by control inputs that have the format
+  // "^node".
+  repeated string input = 3;
+
+  // A (possibly partial) specification for the device on which this
+  // node should be placed.
+  // The expected syntax for this string is as follows:
+  //
+  // DEVICE_SPEC ::= PARTIAL_SPEC
+  //
+  // PARTIAL_SPEC ::= ("/" CONSTRAINT) *
+  // CONSTRAINT ::= ("job:" JOB_NAME)
+  //              | ("replica:" [1-9][0-9]*)
+  //              | ("task:" [1-9][0-9]*)
+  //              | ("device:" [A-Za-z]* ":" ([1-9][0-9]* | "*") )
+  //
+  // Valid values for this string include:
+  // * "/job:worker/replica:0/task:1/device:GPU:3"  (full specification)
+  // * "/job:worker/device:GPU:3"                   (partial specification)
+  // * ""                                    (no specification)
+  //
+  // If the constraints do not resolve to a single device (or if this
+  // field is empty or not present), the runtime will attempt to
+  // choose a device automatically.
+  string device = 4;
+
+  // Operation-specific graph-construction-time configuration.
+  // Note that this should include all attrs defined in the
+  // corresponding OpDef, including those with a value matching
+  // the default -- this allows the default to change and makes
+  // NodeDefs easier to interpret on their own.  However, if
+  // an attr with a default is not specified in this list, the
+  // default will be used.
+  // The "names" (keys) must match the regexp "[a-z][a-z0-9_]+" (and
+  // one of the names from the corresponding OpDef's attr field).
+  // The values must have a type matching the corresponding OpDef
+  // attr's type field.
+  // TODO(josh11b): Add some examples here showing best practices.
+  map<string, AttrValue> attr = 5;
+
+  message ExperimentalDebugInfo {
+    // Opaque string inserted into error messages created by the runtime.
+    //
+    // This is intended to store the list of names of the nodes from the
+    // original graph that this node was derived. For example if this node, say
+    // C, was result of a fusion of 2 nodes A and B, then 'original_node' would
+    // be {A, B}. This information can be used to map errors originating at the
+    // current node to some top level source code.
+    repeated string original_node_names = 1;
+
+    // This is intended to store the list of names of the functions from the
+    // original graph that this node was derived. For example if this node, say
+    // C, was result of a fusion of node A in function FA and node B in function
+    // FB, then `original_funcs` would be {FA, FB}. If the node is in the top
+    // level graph, the `original_func` is empty. This information, with the
+    // `original_node_names` can be used to map errors originating at the
+    // current ndoe to some top level source code.
+    repeated string original_func_names = 2;
+  }
+
+  // This stores debug information associated with the node.
+  ExperimentalDebugInfo experimental_debug_info = 6;
+
+  // The complete type of this node. Experimental and subject to change.
+  // Currently, the field only contains the return types of the node. That will
+  // extend in the future to contain the entire signature of the node, as a
+  // function type.
+  FullTypeDef experimental_type = 7;
+}
diff --git a/src/main/proto/tensorflow/core/framework/op_def.proto b/src/main/proto/tensorflow/core/framework/op_def.proto
new file mode 100644
index 0000000..b71f5ce
--- /dev/null
+++ b/src/main/proto/tensorflow/core/framework/op_def.proto
@@ -0,0 +1,193 @@
+syntax = "proto3";
+
+package tensorflow;
+
+import "tensorflow/core/framework/attr_value.proto";
+import "tensorflow/core/framework/full_type.proto";
+import "tensorflow/core/framework/resource_handle.proto";
+import "tensorflow/core/framework/types.proto";
+
+option cc_enable_arenas = true;
+option java_outer_classname = "OpDefProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/op_def_go_proto";
+
+// Defines an operation. A NodeDef in a GraphDef specifies an Op by
+// using the "op" field which should match the name of a OpDef.
+// LINT.IfChange
+message OpDef {
+  // Op names starting with an underscore are reserved for internal use.
+  // Names should be CamelCase and match the regexp "[A-Z][a-zA-Z0-9>_]*".
+  string name = 1;
+
+  // For describing inputs and outputs.
+  message ArgDef {
+    // Name for the input/output.  Should match the regexp "[a-z][a-z0-9_]*".
+    string name = 1;
+
+    // Human readable description.
+    string description = 2;
+
+    // Describes the type of one or more tensors that are accepted/produced
+    // by this input/output arg.  The only legal combinations are:
+    // * For a single tensor: either the "type" field is set or the
+    //   "type_attr" field is set to the name of an attr with type "type".
+    // * For a sequence of tensors with the same type: the "number_attr"
+    //   field will be set to the name of an attr with type "int", and
+    //   either the "type" or "type_attr" field will be set as for
+    //   single tensors.
+    // * For a sequence of tensors, the "type_list_attr" field will be set
+    //   to the name of an attr with type "list(type)".
+    DataType type = 3;
+    string type_attr = 4;    // if specified, attr must have type "type"
+    string number_attr = 5;  // if specified, attr must have type "int"
+    // If specified, attr must have type "list(type)", and none of
+    // type, type_attr, and number_attr may be specified.
+    string type_list_attr = 6;
+
+    // The handle data for resource inputs.
+    repeated ResourceHandleProto.DtypeAndShape handle_data = 7;
+
+    // For inputs: if true, the inputs are required to be refs.
+    //   By default, inputs can be either refs or non-refs.
+    // For outputs: if true, outputs are refs, otherwise they are not.
+    bool is_ref = 16;
+
+    // Experimental. Full type declaration for this argument.
+    // The full type specification combines type, type_attr, type_list_attr,
+    // etc. into a unified representation.
+    // This declaration may contain non-concrete types (for example,
+    // Tensor<TypeVar<'T'>> is a valid type declaration.
+    //
+    // Note: this is a transient field. The long-term aim is to represent the
+    // entire OpDef as a single type: a callable. In that context, this field is
+    // just the type of a single argument.
+    FullTypeDef experimental_full_type = 17;
+  }
+
+  // Description of the input(s).
+  repeated ArgDef input_arg = 2;
+
+  // Description of the output(s).
+  repeated ArgDef output_arg = 3;
+
+  // Named control outputs for this operation. Useful only for composite
+  // operations (i.e. functions) which want to name different control outputs.
+  repeated string control_output = 20;
+
+  // Description of the graph-construction-time configuration of this
+  // Op.  That is to say, this describes the attr fields that will
+  // be specified in the NodeDef.
+  message AttrDef {
+    // A descriptive name for the argument.  May be used, e.g. by the
+    // Python client, as a keyword argument name, and so should match
+    // the regexp "[a-z][a-z0-9_]+".
+    string name = 1;
+
+    // One of the type names from attr_value.proto ("string", "list(string)",
+    // "int", etc.).
+    string type = 2;
+
+    // A reasonable default for this attribute if the user does not supply
+    // a value.  If not specified, the user must supply a value.
+    AttrValue default_value = 3;
+
+    // Human-readable description.
+    string description = 4;
+
+    // TODO(josh11b): bool is_optional?
+
+    // --- Constraints ---
+    // These constraints are only in effect if specified.  Default is no
+    // constraints.
+
+    // For type == "int", this is a minimum value.  For "list(___)"
+    // types, this is the minimum length.
+    bool has_minimum = 5;
+    int64 minimum = 6;
+
+    // The set of allowed values.  Has type that is the "list" version
+    // of the "type" field above (uses the "list" field of AttrValue).
+    // If type == "type" or "list(type)" above, then the "type" field
+    // of "allowed_values.list" has the set of allowed DataTypes.
+    // If type == "string" or "list(string)", then the "s" field of
+    // "allowed_values.list" has the set of allowed strings.
+    AttrValue allowed_values = 7;
+  }
+  repeated AttrDef attr = 4;
+
+  // Optional deprecation based on GraphDef versions.
+  OpDeprecation deprecation = 8;
+
+  // One-line human-readable description of what the Op does.
+  string summary = 5;
+
+  // Additional, longer human-readable description of what the Op does.
+  string description = 6;
+
+  // -------------------------------------------------------------------------
+  // Which optimizations this operation can participate in.
+
+  // True if the operation is commutative ("op(a,b) == op(b,a)" for all inputs)
+  bool is_commutative = 18;
+
+  // If is_aggregate is true, then this operation accepts N >= 2
+  // inputs and produces 1 output all of the same type.  Should be
+  // associative and commutative, and produce output with the same
+  // shape as the input.  The optimizer may replace an aggregate op
+  // taking input from multiple devices with a tree of aggregate ops
+  // that aggregate locally within each device (and possibly within
+  // groups of nearby devices) before communicating.
+  // TODO(josh11b): Implement that optimization.
+  bool is_aggregate = 16;  // for things like add
+
+  // Other optimizations go here, like
+  //   can_alias_input, rewrite_when_output_unused, partitioning_strategy, etc.
+
+  // -------------------------------------------------------------------------
+  // Optimization constraints.
+
+  // Ops are marked as stateful if their behavior depends on some state beyond
+  // their input tensors (e.g. variable reading op) or if they have
+  // a side-effect (e.g. printing or asserting ops). Equivalently, stateless ops
+  // must always produce the same output for the same input and have
+  // no side-effects.
+  //
+  // By default Ops may be moved between devices.  Stateful ops should
+  // either not be moved, or should only be moved if that state can also
+  // be moved (e.g. via some sort of save / restore).
+  // Stateful ops are guaranteed to never be optimized away by Common
+  // Subexpression Elimination (CSE).
+  bool is_stateful = 17;  // for things like variables, queue
+
+  // -------------------------------------------------------------------------
+  // Non-standard options.
+
+  // By default, all inputs to an Op must be initialized Tensors.  Ops
+  // that may initialize tensors for the first time should set this
+  // field to true, to allow the Op to take an uninitialized Tensor as
+  // input.
+  bool allows_uninitialized_input = 19;  // for Assign, etc.
+
+  // Indicates whether the op implementation uses distributed communication.
+  // If True, the op is allowed to return errors for network disconnection and
+  // trigger TF network failure handling logics.
+  bool is_distributed_communication = 21;
+}
+// LINT.ThenChange(
+//     https://www.tensorflow.org/code/tensorflow/core/framework/op_def_util.cc)
+
+// Information about version-dependent deprecation of an op
+message OpDeprecation {
+  // First GraphDef version at which the op is disallowed.
+  int32 version = 1;
+
+  // Explanation of why it was deprecated and what to use instead.
+  string explanation = 2;
+}
+
+// A collection of OpDefs
+message OpList {
+  repeated OpDef op = 1;
+}
diff --git a/src/main/proto/tensorflow/core/framework/resource_handle.proto b/src/main/proto/tensorflow/core/framework/resource_handle.proto
new file mode 100644
index 0000000..aa53f7f
--- /dev/null
+++ b/src/main/proto/tensorflow/core/framework/resource_handle.proto
@@ -0,0 +1,47 @@
+syntax = "proto3";
+
+package tensorflow;
+
+import "tensorflow/core/framework/tensor_shape.proto";
+import "tensorflow/core/framework/types.proto";
+
+option cc_enable_arenas = true;
+option java_outer_classname = "ResourceHandle";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/resource_handle_go_proto";
+
+// Protocol buffer representing a handle to a tensorflow resource. Handles are
+// not valid across executions, but can be serialized back and forth from within
+// a single run.
+message ResourceHandleProto {
+  // Unique name for the device containing the resource.
+  string device = 1;
+
+  // Container in which this resource is placed.
+  string container = 2;
+
+  // Unique name of this resource.
+  string name = 3;
+
+  // Hash code for the type of the resource. Is only valid in the same device
+  // and in the same execution.
+  uint64 hash_code = 4;
+
+  // For debug-only, the name of the type pointed to by this handle, if
+  // available.
+  string maybe_type_name = 5;
+
+  // Protocol buffer representing a pair of (data type, tensor shape).
+  message DtypeAndShape {
+    // Data type of the tensor.
+    DataType dtype = 1;
+    // Shape of the tensor.
+    TensorShapeProto shape = 2;
+  }
+
+  // Data types and shapes for the underlying resource.
+  repeated DtypeAndShape dtypes_and_shapes = 6;
+
+  reserved 7;
+}
diff --git a/src/main/proto/tensorflow/core/framework/step_stats.proto b/src/main/proto/tensorflow/core/framework/step_stats.proto
new file mode 100644
index 0000000..762487f
--- /dev/null
+++ b/src/main/proto/tensorflow/core/framework/step_stats.proto
@@ -0,0 +1,88 @@
+syntax = "proto3";
+
+package tensorflow;
+
+import "tensorflow/core/framework/allocation_description.proto";
+import "tensorflow/core/framework/tensor_description.proto";
+
+option cc_enable_arenas = true;
+option java_outer_classname = "StepStatsProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/step_stats_go_proto";
+
+// An allocation/de-allocation operation performed by the allocator.
+message AllocationRecord {
+  // The timestamp of the operation.
+  int64 alloc_micros = 1;
+  // Number of bytes allocated, or de-allocated if negative.
+  int64 alloc_bytes = 2;
+}
+
+message AllocatorMemoryUsed {
+  string allocator_name = 1;
+  // These are per-node allocator memory stats.
+  int64 total_bytes = 2;
+  int64 peak_bytes = 3;
+  // The bytes that are not deallocated.
+  int64 live_bytes = 4;
+  // The allocation and deallocation timeline.
+  repeated AllocationRecord allocation_records = 6;
+
+  // These are snapshots of the overall allocator memory stats.
+  // The number of live bytes currently allocated by the allocator.
+  int64 allocator_bytes_in_use = 5;
+}
+
+// Output sizes recorded for a single execution of a graph node.
+message NodeOutput {
+  int32 slot = 1;
+  TensorDescription tensor_description = 3;
+}
+
+// For memory tracking.
+message MemoryStats {
+  int64 temp_memory_size = 1;
+  int64 persistent_memory_size = 3;
+  repeated int64 persistent_tensor_alloc_ids = 5;
+
+  int64 device_temp_memory_size = 2 [deprecated = true];
+  int64 device_persistent_memory_size = 4 [deprecated = true];
+  repeated int64 device_persistent_tensor_alloc_ids = 6 [deprecated = true];
+}
+
+// Time/size stats recorded for a single execution of a graph node.
+message NodeExecStats {
+  // TODO(tucker): Use some more compact form of node identity than
+  // the full string name.  Either all processes should agree on a
+  // global id (cost_id?) for each node, or we should use a hash of
+  // the name.
+  string node_name = 1;
+  int64 all_start_micros = 2;
+  int64 op_start_rel_micros = 3;
+  int64 op_end_rel_micros = 4;
+  int64 all_end_rel_micros = 5;
+  repeated AllocatorMemoryUsed memory = 6;
+  repeated NodeOutput output = 7;
+  string timeline_label = 8;
+  int64 scheduled_micros = 9;
+  uint32 thread_id = 10;
+  repeated AllocationDescription referenced_tensor = 11;
+  MemoryStats memory_stats = 12;
+  int64 all_start_nanos = 13;
+  int64 op_start_rel_nanos = 14;
+  int64 op_end_rel_nanos = 15;
+  int64 all_end_rel_nanos = 16;
+  int64 scheduled_nanos = 17;
+}
+
+message DeviceStepStats {
+  string device = 1;
+  repeated NodeExecStats node_stats = 2;
+  // Its key is thread id.
+  map<uint32, string> thread_names = 3;
+}
+
+message StepStats {
+  repeated DeviceStepStats dev_stats = 1;
+}
diff --git a/src/main/proto/tensorflow/core/framework/tensor.proto b/src/main/proto/tensorflow/core/framework/tensor.proto
new file mode 100644
index 0000000..9f5a29a
--- /dev/null
+++ b/src/main/proto/tensorflow/core/framework/tensor.proto
@@ -0,0 +1,101 @@
+syntax = "proto3";
+
+package tensorflow;
+
+import "tensorflow/core/framework/resource_handle.proto";
+import "tensorflow/core/framework/tensor_shape.proto";
+import "tensorflow/core/framework/types.proto";
+
+option cc_enable_arenas = true;
+option java_outer_classname = "TensorProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/tensor_go_proto";
+
+// Protocol buffer representing a tensor.
+message TensorProto {
+  // Data type of the tensor.
+  DataType dtype = 1;
+
+  // Shape of the tensor.  TODO(touts): sort out the 0-rank issues.
+  TensorShapeProto tensor_shape = 2;
+
+  // Only one of the representations below is set, one of "tensor_contents" and
+  // the "xxx_val" attributes.  We are not using oneof because as oneofs cannot
+  // contain repeated fields it would require another extra set of messages.
+
+  // Version number.
+  //
+  // In version 0, if the "repeated xxx" representations contain only one
+  // element, that element is repeated to fill the shape.  This makes it easy
+  // to represent a constant Tensor with a single value.
+  int32 version_number = 3;
+
+  // Serialized raw tensor content from either Tensor::AsProtoTensorContent or
+  // memcpy in tensorflow::grpc::EncodeTensorToByteBuffer. This representation
+  // can be used for all tensor types. The purpose of this representation is to
+  // reduce serialization overhead during RPC call by avoiding serialization of
+  // many repeated small items.
+  bytes tensor_content = 4;
+
+  // Type specific representations that make it easy to create tensor protos in
+  // all languages.  Only the representation corresponding to "dtype" can
+  // be set.  The values hold the flattened representation of the tensor in
+  // row major order.
+
+  // DT_HALF, DT_BFLOAT16. Note that since protobuf has no int16 type, we'll
+  // have some pointless zero padding for each value here.
+  repeated int32 half_val = 13 [packed = true];
+
+  // DT_FLOAT.
+  repeated float float_val = 5 [packed = true];
+
+  // DT_DOUBLE.
+  repeated double double_val = 6 [packed = true];
+
+  // DT_INT32, DT_INT16, DT_UINT16, DT_INT8, DT_UINT8.
+  repeated int32 int_val = 7 [packed = true];
+
+  // DT_STRING
+  repeated bytes string_val = 8;
+
+  // DT_COMPLEX64. scomplex_val(2*i) and scomplex_val(2*i+1) are real
+  // and imaginary parts of i-th single precision complex.
+  repeated float scomplex_val = 9 [packed = true];
+
+  // DT_INT64
+  repeated int64 int64_val = 10 [packed = true];
+
+  // DT_BOOL
+  repeated bool bool_val = 11 [packed = true];
+
+  // DT_COMPLEX128. dcomplex_val(2*i) and dcomplex_val(2*i+1) are real
+  // and imaginary parts of i-th double precision complex.
+  repeated double dcomplex_val = 12 [packed = true];
+
+  // DT_RESOURCE
+  repeated ResourceHandleProto resource_handle_val = 14;
+
+  // DT_VARIANT
+  repeated VariantTensorDataProto variant_val = 15;
+
+  // DT_UINT32
+  repeated uint32 uint32_val = 16 [packed = true];
+
+  // DT_UINT64
+  repeated uint64 uint64_val = 17 [packed = true];
+
+  // DT_FLOAT8_*, use variable-sized set of bytes
+  // (i.e. the equivalent of repeated uint8, if such a thing existed).
+  bytes float8_val = 18;
+}
+
+// Protocol buffer representing the serialization format of DT_VARIANT tensors.
+message VariantTensorDataProto {
+  // Name of the type of objects being serialized.
+  string type_name = 1;
+  // Portions of the object that are not Tensors.
+  bytes metadata = 2;
+  // Tensors contained within objects being serialized.
+  repeated TensorProto tensors = 3;
+}
diff --git a/src/main/proto/tensorflow/core/framework/tensor_description.proto b/src/main/proto/tensorflow/core/framework/tensor_description.proto
new file mode 100644
index 0000000..8462c63
--- /dev/null
+++ b/src/main/proto/tensorflow/core/framework/tensor_description.proto
@@ -0,0 +1,24 @@
+syntax = "proto3";
+
+package tensorflow;
+
+import "tensorflow/core/framework/allocation_description.proto";
+import "tensorflow/core/framework/tensor_shape.proto";
+import "tensorflow/core/framework/types.proto";
+
+option cc_enable_arenas = true;
+option java_outer_classname = "TensorDescriptionProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/tensor_description_go_proto";
+
+message TensorDescription {
+  // Data type of tensor elements
+  DataType dtype = 1;
+
+  // Shape of the tensor.
+  TensorShapeProto shape = 2;
+
+  // Information about the size and allocator used for the data
+  AllocationDescription allocation_description = 4;
+}
diff --git a/src/main/proto/tensorflow/core/framework/tensor_shape.proto b/src/main/proto/tensorflow/core/framework/tensor_shape.proto
new file mode 100644
index 0000000..45d5b78
--- /dev/null
+++ b/src/main/proto/tensorflow/core/framework/tensor_shape.proto
@@ -0,0 +1,46 @@
+// Protocol buffer representing the shape of tensors.
+
+syntax = "proto3";
+option cc_enable_arenas = true;
+option java_outer_classname = "TensorShapeProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/tensor_shape_go_proto";
+
+package tensorflow;
+
+// Dimensions of a tensor.
+message TensorShapeProto {
+  // One dimension of the tensor.
+  message Dim {
+    // Size of the tensor in that dimension.
+    // This value must be >= -1, but values of -1 are reserved for "unknown"
+    // shapes (values of -1 mean "unknown" dimension).  Certain wrappers
+    // that work with TensorShapeProto may fail at runtime when deserializing
+    // a TensorShapeProto containing a dim value of -1.
+    int64 size = 1;
+
+    // Optional name of the tensor dimension.
+    string name = 2;
+  };
+
+  // Dimensions of the tensor, such as {"input", 30}, {"output", 40}
+  // for a 30 x 40 2D tensor.  If an entry has size -1, this
+  // corresponds to a dimension of unknown size. The names are
+  // optional.
+  //
+  // The order of entries in "dim" matters: It indicates the layout of the
+  // values in the tensor in-memory representation.
+  //
+  // The first entry in "dim" is the outermost dimension used to layout the
+  // values, the last entry is the innermost dimension.  This matches the
+  // in-memory layout of RowMajor Eigen tensors.
+  //
+  // If "dim.size()" > 0, "unknown_rank" must be false.
+  repeated Dim dim = 2;
+
+  // If true, the number of dimensions in the shape is unknown.
+  //
+  // If true, "dim.size()" must be 0.
+  bool unknown_rank = 3;
+};
diff --git a/src/main/proto/tensorflow/core/framework/types.proto b/src/main/proto/tensorflow/core/framework/types.proto
new file mode 100644
index 0000000..d0a9738
--- /dev/null
+++ b/src/main/proto/tensorflow/core/framework/types.proto
@@ -0,0 +1,100 @@
+syntax = "proto3";
+
+package tensorflow;
+
+option cc_enable_arenas = true;
+option java_outer_classname = "TypesProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/types_go_proto";
+
+// (== suppress_warning documentation-presence ==)
+// LINT.IfChange
+enum DataType {
+  // Not a legal value for DataType.  Used to indicate a DataType field
+  // has not been set.
+  DT_INVALID = 0;
+
+  // Data types that all computation devices are expected to be
+  // capable to support.
+  DT_FLOAT = 1;
+  DT_DOUBLE = 2;
+  DT_INT32 = 3;
+  DT_UINT8 = 4;
+  DT_INT16 = 5;
+  DT_INT8 = 6;
+  DT_STRING = 7;
+  DT_COMPLEX64 = 8;  // Single-precision complex
+  DT_INT64 = 9;
+  DT_BOOL = 10;
+  DT_QINT8 = 11;     // Quantized int8
+  DT_QUINT8 = 12;    // Quantized uint8
+  DT_QINT32 = 13;    // Quantized int32
+  DT_BFLOAT16 = 14;  // Float32 truncated to 16 bits.
+  DT_QINT16 = 15;    // Quantized int16
+  DT_QUINT16 = 16;   // Quantized uint16
+  DT_UINT16 = 17;
+  DT_COMPLEX128 = 18;  // Double-precision complex
+  DT_HALF = 19;
+  DT_RESOURCE = 20;
+  DT_VARIANT = 21;  // Arbitrary C++ data types
+  DT_UINT32 = 22;
+  DT_UINT64 = 23;
+  DT_FLOAT8_E5M2 = 24;    // 5 exponent bits, 2 mantissa bits.
+  DT_FLOAT8_E4M3FN = 25;  // 4 exponent bits, 3 mantissa bits, finite-only, with
+                          // 2 NaNs (0bS1111111).
+  // TODO - b/299182407: Leaving room for remaining float8 types.
+  // DT_FLOAT8_E4M3FNUZ = 26;
+  // DT_FLOAT8_E4M3B11FNUZ = 27;
+  // DT_FLOAT8_E5M2FNUZ = 28;
+  DT_INT4 = 29;
+  DT_UINT4 = 30;
+
+  // Do not use!  These are only for TF1's obsolete reference Variables.
+  // Every enum above should have a corresponding value below (verified by
+  // types_test).
+  DT_FLOAT_REF = 101;
+  DT_DOUBLE_REF = 102;
+  DT_INT32_REF = 103;
+  DT_UINT8_REF = 104;
+  DT_INT16_REF = 105;
+  DT_INT8_REF = 106;
+  DT_STRING_REF = 107;
+  DT_COMPLEX64_REF = 108;
+  DT_INT64_REF = 109;
+  DT_BOOL_REF = 110;
+  DT_QINT8_REF = 111;
+  DT_QUINT8_REF = 112;
+  DT_QINT32_REF = 113;
+  DT_BFLOAT16_REF = 114;
+  DT_QINT16_REF = 115;
+  DT_QUINT16_REF = 116;
+  DT_UINT16_REF = 117;
+  DT_COMPLEX128_REF = 118;
+  DT_HALF_REF = 119;
+  DT_RESOURCE_REF = 120;
+  DT_VARIANT_REF = 121;
+  DT_UINT32_REF = 122;
+  DT_UINT64_REF = 123;
+  DT_FLOAT8_E5M2_REF = 124;
+  DT_FLOAT8_E4M3FN_REF = 125;
+  // TODO - b/299182407: Leaving room for remaining float8 types.
+  // DT_FLOAT8_E4M3FNUZ_REF = 126;
+  // DT_FLOAT8_E4M3B11FNUZ_REF = 127;
+  // DT_FLOAT8_E5M2FNUZ_REF = 128;
+  DT_INT4_REF = 129;
+  DT_UINT4_REF = 130;
+}
+// LINT.ThenChange(
+//    https://www.tensorflow.org/code/tensorflow/c/tf_datatype.h,
+//    https://www.tensorflow.org/code/tensorflow/go/tensor.go,
+//    https://www.tensorflow.org/code/tensorflow/core/framework/tensor.cc,
+//    https://www.tensorflow.org/code/tensorflow/core/framework/types.h,
+//    https://www.tensorflow.org/code/tensorflow/core/framework/types.cc,
+//    https://www.tensorflow.org/code/tensorflow/python/framework/dtypes.py,
+//    https://www.tensorflow.org/code/tensorflow/python/framework/function.py)
+
+// Represents a serialized tf.dtypes.Dtype
+message SerializedDType {
+  DataType datatype = 1;
+}
diff --git a/src/main/proto/tensorflow/core/framework/variable.proto b/src/main/proto/tensorflow/core/framework/variable.proto
new file mode 100644
index 0000000..09d7fb3
--- /dev/null
+++ b/src/main/proto/tensorflow/core/framework/variable.proto
@@ -0,0 +1,84 @@
+syntax = "proto3";
+
+package tensorflow;
+
+option cc_enable_arenas = true;
+option java_outer_classname = "VariableProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/variable_go_proto";
+
+// Indicates when a distributed variable will be synced.
+enum VariableSynchronization {
+  // `AUTO`: Indicates that the synchronization will be determined by the
+  // current `DistributionStrategy` (eg. With `MirroredStrategy` this would be
+  // `ON_WRITE`).
+  VARIABLE_SYNCHRONIZATION_AUTO = 0;
+  // `NONE`: Indicates that there will only be one copy of the variable, so
+  // there is no need to sync.
+  VARIABLE_SYNCHRONIZATION_NONE = 1;
+  // `ON_WRITE`: Indicates that the variable will be updated across devices
+  // every time it is written.
+  VARIABLE_SYNCHRONIZATION_ON_WRITE = 2;
+  // `ON_READ`: Indicates that the variable will be aggregated across devices
+  // when it is read (eg. when checkpointing or when evaluating an op that uses
+  // the variable).
+  VARIABLE_SYNCHRONIZATION_ON_READ = 3;
+}
+
+// Indicates how a distributed variable will be aggregated.
+enum VariableAggregation {
+  // `NONE`: This is the default, giving an error if you use a
+  // variable-update operation with multiple replicas.
+  VARIABLE_AGGREGATION_NONE = 0;
+  // `SUM`: Add the updates across replicas.
+  VARIABLE_AGGREGATION_SUM = 1;
+  // `MEAN`: Take the arithmetic mean ("average") of the updates across
+  // replicas.
+  VARIABLE_AGGREGATION_MEAN = 2;
+  // `ONLY_FIRST_REPLICA`: This is for when every replica is performing the same
+  // update, but we only want to perform the update once. Used, e.g., for the
+  // global step counter.
+  VARIABLE_AGGREGATION_ONLY_FIRST_REPLICA = 3;
+}
+
+// Protocol buffer representing a Variable.
+message VariableDef {
+  // Name of the variable tensor.
+  string variable_name = 1;
+
+  // Name of the tensor holding the variable's initial value.
+  string initial_value_name = 6;
+
+  // Name of the initializer op.
+  string initializer_name = 2;
+
+  // Name of the snapshot tensor.
+  string snapshot_name = 3;
+
+  // Support for saving variables as slices of a larger variable.
+  SaveSliceInfoDef save_slice_info_def = 4;
+
+  // Whether to represent this as a ResourceVariable.
+  bool is_resource = 5;
+
+  // Whether this variable should be trained.
+  bool trainable = 7;
+
+  // Indicates when a distributed variable will be synced.
+  VariableSynchronization synchronization = 8;
+
+  // Indicates how a distributed variable will be aggregated.
+  VariableAggregation aggregation = 9;
+}
+
+message SaveSliceInfoDef {
+  // Name of the full variable of which this is a slice.
+  string full_name = 1;
+  // Shape of the full variable.
+  repeated int64 full_shape = 2;
+  // Offset of this variable into the full variable.
+  repeated int64 var_offset = 3;
+  // Shape of this variable.
+  repeated int64 var_shape = 4;
+}
diff --git a/src/main/proto/tensorflow/core/framework/versions.proto b/src/main/proto/tensorflow/core/framework/versions.proto
new file mode 100644
index 0000000..2cca6e3
--- /dev/null
+++ b/src/main/proto/tensorflow/core/framework/versions.proto
@@ -0,0 +1,33 @@
+syntax = "proto3";
+
+package tensorflow;
+
+option cc_enable_arenas = true;
+option java_outer_classname = "VersionsProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/versions_go_proto";
+
+// Version information for a piece of serialized data
+//
+// There are different types of versions for each type of data
+// (GraphDef, etc.), but they all have the same common shape
+// described here.
+//
+// Each consumer has "consumer" and "min_producer" versions (specified
+// elsewhere).  A consumer is allowed to consume this data if
+//
+//   producer >= min_producer
+//   consumer >= min_consumer
+//   consumer not in bad_consumers
+//
+message VersionDef {
+  // The version of the code that produced this data.
+  int32 producer = 1;
+
+  // Any consumer below this version is not allowed to consume this data.
+  int32 min_consumer = 2;
+
+  // Specific consumer versions which are disallowed (e.g. due to bugs).
+  repeated int32 bad_consumers = 3;
+}
diff --git a/src/main/proto/tensorflow/core/protobuf/cluster.proto b/src/main/proto/tensorflow/core/protobuf/cluster.proto
new file mode 100644
index 0000000..4065eb4
--- /dev/null
+++ b/src/main/proto/tensorflow/core/protobuf/cluster.proto
@@ -0,0 +1,87 @@
+/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+syntax = "proto3";
+
+package tensorflow;
+
+option cc_enable_arenas = true;
+option java_outer_classname = "ClusterProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.distruntime";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto";
+
+// This file contains protos to be used when defining a TensorFlow
+// cluster.
+//
+// EXAMPLES
+// --------
+//
+// 1. A single-process cluster, containing "/job:local/task:0".
+//
+//    Cluster:
+//      job { name: 'local' tasks { key: 0 value: 'localhost:2222' } }
+//
+//    Server:
+//      cluster { $CLUSTER } job_name: 'local' task_index: 0
+//
+// 2. A two-process cluster, containing "/job:local/task:{0,1}".
+//
+//    Cluster:
+//      job { name: 'local' tasks { key: 0 value: 'localhost:2222' }
+//                          tasks { key: 1 value: 'localhost:2223' } }
+//
+//    Servers:
+//      cluster { $CLUSTER } job_name: 'local' task_index: 0
+//      cluster { $CLUSTER } job_name: 'local' task_index: 1
+//
+// 3. A two-job cluster, containing "/job:worker/task:{0,1,2}" and
+//    "/job:ps/task:{0,1}".
+//
+//    Cluster:
+//      job { name: 'worker' tasks { key: 0 value: 'worker1:2222' }
+//                           tasks { key: 1 value: 'worker2:2222' }
+//                           tasks { key: 2 value: 'worker3:2222' } }
+//      job { name: 'ps'     tasks { key: 0 value: 'ps0:2222' }
+//                           tasks { key: 1 value: 'ps1:2222' } }
+//
+//    Servers:
+//      cluster { $CLUSTER } job_name: 'worker' task_index: 0
+//      cluster { $CLUSTER } job_name: 'worker' task_index: 1
+//      cluster { $CLUSTER } job_name: 'worker' task_index: 2
+//      cluster { $CLUSTER } job_name: 'ps'     task_index: 0
+//      cluster { $CLUSTER } job_name: 'ps'     task_index: 1
+
+// Defines a single job in a TensorFlow cluster.
+message JobDef {
+  // The name of this job.
+  string name = 1;
+
+  // Mapping from task ID to "hostname:port" string.
+  //
+  // If the `name` field contains "worker", and the `tasks` map contains a
+  // mapping from 7 to "example.org:2222", then the device prefix
+  // "/job:worker/task:7" will be assigned to "example.org:2222".
+  //
+  // If a job has multiple replicas, host-ports will be comma-delimited, with
+  // one entry for each replica.
+  map<int32, string> tasks = 2;
+}
+
+// Defines a TensorFlow cluster as a set of jobs.
+message ClusterDef {
+  // The jobs that comprise the cluster.
+  repeated JobDef job = 1;
+}
diff --git a/src/main/proto/tensorflow/core/protobuf/config.proto b/src/main/proto/tensorflow/core/protobuf/config.proto
new file mode 100644
index 0000000..8e6b814
--- /dev/null
+++ b/src/main/proto/tensorflow/core/protobuf/config.proto
@@ -0,0 +1,1015 @@
+syntax = "proto3";
+
+package tensorflow;
+
+import "xla/tsl/protobuf/coordination_config.proto";
+import "tensorflow/core/framework/cost_graph.proto";
+import "tensorflow/core/framework/graph.proto";
+import "tensorflow/core/framework/step_stats.proto";
+import "tensorflow/core/protobuf/cluster.proto";
+import "tensorflow/core/protobuf/debug.proto";
+import "tensorflow/core/protobuf/rewriter_config.proto";
+import "tensorflow/core/protobuf/rpc_options.proto";
+
+option cc_enable_arenas = true;
+option java_outer_classname = "ConfigProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto";
+
+message GPUOptions {
+  // Fraction of the total GPU memory to allocate for each process.
+  // 1 means to allocate all of the GPU memory, 0.5 means the process
+  // allocates up to ~50% of the total GPU memory.
+  //
+  // GPU memory is pre-allocated unless the allow_growth option is enabled.
+  //
+  // If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
+  // the amount of memory available on the GPU device by using host memory as a
+  // swap space. Accessing memory not available on the device will be
+  // significantly slower as that would require memory transfer between the host
+  // and the device. Options to reduce the memory requirement should be
+  // considered before enabling this option as this may come with a negative
+  // performance impact. Oversubscription using the unified memory requires
+  // Pascal class or newer GPUs and it is currently only supported on the Linux
+  // operating system. See
+  // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
+  // for the detailed requirements.
+  double per_process_gpu_memory_fraction = 1;
+
+  // If true, the allocator does not pre-allocate the entire specified
+  // GPU memory region, instead starting small and growing as needed.
+  bool allow_growth = 4;
+
+  // The type of GPU allocation strategy to use.
+  //
+  // Allowed values:
+  // "": The empty string (default) uses a system-chosen default
+  //     which may change over time.
+  //
+  // "BFC": A "Best-fit with coalescing" algorithm, simplified from a
+  //        version of dlmalloc.
+  string allocator_type = 2;
+
+  // Delay deletion of up to this many bytes to reduce the number of
+  // interactions with gpu driver code.  If 0, the system chooses
+  // a reasonable default (several MBs).
+  int64 deferred_deletion_bytes = 3;
+
+  // A comma-separated list of GPU ids that determines the 'visible'
+  // to 'virtual' mapping of GPU devices.  For example, if TensorFlow
+  // can see 8 GPU devices in the process, and one wanted to map
+  // visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
+  // then one would specify this field as "5,3".  This field is similar in
+  // spirit to the CUDA_VISIBLE_DEVICES environment variable, except
+  // it applies to the visible GPU devices in the process.
+  //
+  // NOTE:
+  // 1. The GPU driver provides the process with the visible GPUs
+  //    in an order which is not guaranteed to have any correlation to
+  //    the *physical* GPU id in the machine.  This field is used for
+  //    remapping "visible" to "virtual", which means this operates only
+  //    after the process starts.  Users are required to use vendor
+  //    specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
+  //    physical to visible device mapping prior to invoking TensorFlow.
+  // 2. In the code, the ids in this list are also called "platform GPU id"s,
+  //    and the 'virtual' ids of GPU devices (i.e. the ids in the device
+  //    name "/device:GPU:<id>") are also called "TF GPU id"s. Please
+  //    refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
+  //    for more information.
+  // 3. The visible_device_list is also used for PluggableDevice. And
+  //    different types of PluggableDevices share this field. In that case,
+  //    the pluggable_device_type is used to distinguish them, making the
+  //    visible_device_list a list of <pluggable_device_type>:<device_index>,
+  //    e.g. "PluggableDeviceA:0,PluggableDeviceA:1,PluggableDeviceB:0".
+  string visible_device_list = 5;
+
+  // In the event polling loop sleep this many microseconds between
+  // PollEvents calls, when the queue is not empty.  If value is not
+  // set or set to 0, gets set to a non-zero default.
+  int32 polling_active_delay_usecs = 6;
+
+  // This field is deprecated and ignored.
+  int32 polling_inactive_delay_msecs = 7;
+
+  // Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
+  // enabling this option forces all CPU tensors to be allocated with Cuda
+  // pinned memory. Normally, TensorFlow will infer which tensors should be
+  // allocated as the pinned memory. But in case where the inference is
+  // incomplete, this option can significantly speed up the cross-device memory
+  // copy performance as long as it fits the memory.
+  // Note that this option is not something that should be
+  // enabled by default for unknown or very large models, since all Cuda pinned
+  // memory is unpageable, having too much pinned memory might negatively impact
+  // the overall host system performance.
+  bool force_gpu_compatible = 8;
+
+  message Experimental {
+    // Configuration for breaking down a visible GPU into multiple "virtual"
+    // devices.
+    message VirtualDevices {
+      // Per "virtual" device memory limit, in MB. The number of elements in
+      // the list is the number of virtual devices to create on the
+      // corresponding visible GPU (see "virtual_devices" below).
+      // If empty and `num_virtual_devices_per_gpu` is not set, it will create
+      // single virtual device taking all available memory from the device.
+      //
+      // For the concept of "visible" and "virtual" GPU, see the comments for
+      // "visible_device_list" above for more information.
+      repeated float memory_limit_mb = 1;
+
+      // Priority values to use with the virtual devices. Use the cuda function
+      // cudaDeviceGetStreamPriorityRange to query for valid range of values for
+      // priority.
+      //
+      // On a P4000 GPU with cuda 10.1, the priority range reported was 0 for
+      // least priority and -1 for greatest priority.
+      //
+      // If this field is not specified, then the virtual devices will be
+      // created with the default. If this field has values set, then the size
+      // of this must match with the above memory_limit_mb.
+      repeated int32 priority = 2;
+
+      // Virtual Device ordinal number determines the device ID of the device.
+      // A Virtual device with a lower ordinal number always receives the a
+      // smaller device id. The phyiscal device id and location in the
+      // virtual device list is used to break ties.
+      repeated int32 device_ordinal = 3;
+    }
+
+    // The multi virtual device settings. If empty (not set), it will create
+    // single virtual device on each visible GPU, according to the settings
+    // in "visible_device_list" above. Otherwise, the number of elements in the
+    // list must be the same as the number of visible GPUs (after
+    // "visible_device_list" filtering if it is set), and the string represented
+    // device names (e.g. /device:GPU:<id>) will refer to the virtual
+    // devices and have the <id> field assigned sequentially starting from 0,
+    // according to the order of the virtual devices determined by
+    // device_ordinal and the location in the virtual device list.
+    //
+    // For example,
+    //   visible_device_list = "1,0"
+    //   virtual_devices { memory_limit: 1GB memory_limit: 2GB }
+    //   virtual_devices { memory_limit: 3GB memory_limit: 4GB }
+    // will create 4 virtual devices as:
+    //   /device:GPU:0 -> visible GPU 1 with 1GB memory
+    //   /device:GPU:1 -> visible GPU 1 with 2GB memory
+    //   /device:GPU:2 -> visible GPU 0 with 3GB memory
+    //   /device:GPU:3 -> visible GPU 0 with 4GB memory
+    //
+    // but
+    //   visible_device_list = "1,0"
+    //   virtual_devices { memory_limit: 1GB memory_limit: 2GB
+    //                     device_ordinal: 10 device_ordinal: 20}
+    //   virtual_devices { memory_limit: 3GB memory_limit: 4GB
+    //                     device_ordinal: 10 device_ordinal: 20}
+    // will create 4 virtual devices as:
+    //   /device:GPU:0 -> visible GPU 1 with 1GB memory  (ordinal 10)
+    //   /device:GPU:1 -> visible GPU 0 with 3GB memory  (ordinal 10)
+    //   /device:GPU:2 -> visible GPU 1 with 2GB memory  (ordinal 20)
+    //   /device:GPU:3 -> visible GPU 0 with 4GB memory  (ordinal 20)
+    //
+    // NOTE:
+    // 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
+    //    at the same time.
+    // 2. Currently this setting is per-process, not per-session. Using
+    //    different settings in different sessions within same process will
+    //    result in undefined behavior.
+    repeated VirtualDevices virtual_devices = 1;
+
+    // The number of virtual devices to create on each visible GPU. The
+    // available memory will be split equally among all virtual devices. If the
+    // field `memory_limit_mb` in `VirtualDevices` is not empty, this field will
+    // be ignored.
+    int32 num_virtual_devices_per_gpu = 15;
+
+    // If true, uses CUDA unified memory for memory allocations. If
+    // per_process_gpu_memory_fraction option is greater than 1.0, then unified
+    // memory is used regardless of the value for this field. See comments for
+    // per_process_gpu_memory_fraction field for more details and requirements
+    // of the unified memory. This option is useful to oversubscribe memory if
+    // multiple processes are sharing a single GPU while individually using less
+    // than 1.0 per process memory fraction.
+    bool use_unified_memory = 2;
+
+    // If > 1, the number of device-to-device copy streams to create
+    // for each GPUDevice.  Default value is 0, which is automatically
+    // converted to 1.
+    int32 num_dev_to_dev_copy_streams = 3;
+
+    // If non-empty, defines a good GPU ring order on a single worker based on
+    // device interconnect.  This assumes that all workers have the same GPU
+    // topology.  Specify as a comma-separated string, e.g. "3,2,1,0,7,6,5,4".
+    // This ring order is used by the RingReducer implementation of
+    // CollectiveReduce, and serves as an override to automatic ring order
+    // generation in OrderTaskDeviceMap() during CollectiveParam resolution.
+    string collective_ring_order = 4;
+
+    // If true then extra work is done by GPUDevice and GPUBFCAllocator to
+    // keep track of when GPU memory is freed and when kernels actually
+    // complete so that we can know when a nominally free memory chunk
+    // is really not subject to pending use.
+    bool timestamped_allocator = 5;
+
+    // reserved id: 6
+
+    // Parameters for GPUKernelTracker.  By default no kernel tracking is done.
+    // Note that timestamped_allocator is only effective if some tracking is
+    // specified.
+    //
+    // If kernel_tracker_max_interval = n > 0, then a tracking event
+    // is inserted after every n kernels without an event.
+    int32 kernel_tracker_max_interval = 7;
+    // If kernel_tracker_max_bytes = n > 0, then a tracking event is
+    // inserted after every series of kernels allocating a sum of
+    // memory >= n.  If one kernel allocates b * n bytes, then one
+    // event will be inserted after it, but it will count as b against
+    // the pending limit.
+    int32 kernel_tracker_max_bytes = 8;
+    // If kernel_tracker_max_pending > 0 then no more than this many
+    // tracking events can be outstanding at a time.  An attempt to
+    // launch an additional kernel will stall until an event
+    // completes.
+    int32 kernel_tracker_max_pending = 9;
+
+    // BFC Allocator can return an allocated chunk of memory upto 2x the
+    // requested size. For virtual devices with tight memory constraints, and
+    // proportionately large allocation requests, this can lead to a significant
+    // reduction in available memory. The threshold below controls when a chunk
+    // should be split if the chunk size exceeds requested memory size. It is
+    // expressed as a fraction of total available memory for the tf device. For
+    // example setting it to 0.05 would imply a chunk needs to be split if its
+    // size exceeds the requested memory by 5% of the total virtual device/gpu
+    // memory size.
+    double internal_fragmentation_fraction = 10;
+
+    // When true, use CUDA cudaMallocAsync API instead of TF gpu allocator.
+    bool use_cuda_malloc_async = 11;
+
+    // By default, BFCAllocator may sleep when it runs out of memory, in the
+    // hopes that another thread will free up memory in the meantime.  Setting
+    // this to true disables the sleep; instead we'll OOM immediately.
+    bool disallow_retry_on_allocation_failure = 12;
+
+    // Memory limit for "GPU host allocator", aka pinned memory allocator.  This
+    // can also be set via the envvar TF_GPU_HOST_MEM_LIMIT_IN_MB.
+    float gpu_host_mem_limit_in_mb = 13;
+
+    // If true, then the host allocator allocates its max memory all upfront and
+    // never grows.  This can be useful for latency-sensitive systems, because
+    // growing the GPU host memory pool can be expensive.
+    //
+    // You probably only want to use this in combination with
+    // gpu_host_mem_limit_in_mb, because the default GPU host memory limit is
+    // quite high.
+    bool gpu_host_mem_disallow_growth = 14;
+
+    // Memory limit for gpu system. This can also be set by
+    // TF_DEVICE_MIN_SYS_MEMORY_IN_MB, which takes precedence over
+    // gpu_system_memory_size_in_mb. With this, user can configure the gpu
+    // system memory size for better resource estimation of multi-tenancy(one
+    // gpu with multiple model) use case.
+    int32 gpu_system_memory_size_in_mb = 16;
+
+    // If true, save information needed for created a PjRt GPU client for
+    // creating a client with remote devices.
+    bool populate_pjrt_gpu_client_creation_info = 17;
+
+    // node_id for use when creating a PjRt GPU client with remote devices,
+    // which enumerates jobs*tasks from a ServerDef.
+    int32 node_id = 18;
+
+    // Whether to merge data transfer streams into the compute stream in the
+    // same stream group. Stream merging helps reduce the overhead caused by
+    // stream synchronization, especially when data transfers are frequent. For
+    // example, setting "merge_host_to_device_stream = true" will make the
+    // compute stream responsible for both computation and host to device memory
+    // copy.
+    message StreamMergeOptions {
+      // If true, the compute stream will be used for host_to_device copy as
+      // well. It's no longer necessary to record an event before the copy to
+      // let the copy stream wait for the compute stream to finish. There is
+      // also no need to wait for the copy to complete before executing the
+      // callback function.
+      bool merge_host_to_device_stream = 1;
+
+      // If true, the compute stream will be used for device_to_host copy as
+      // well. It's no longer necessary to record an event before the copy to
+      // let the copy stream wait for the compute stream to finish.
+      bool merge_device_to_host_stream = 2;
+
+      // If true, the compute stream will be used for device_to_device copy as
+      // well. It's no longer necessary to record an event before the copy to
+      // let the copy stream wait for the compute stream of the sending device
+      // to finish. There is also no need to wait for the compute stream of the
+      // receiving device to finish if the copy is within the same device.
+      bool merge_device_to_device_stream = 3;
+    }
+
+    StreamMergeOptions stream_merge_options = 19;
+  }
+
+  // Everything inside experimental is subject to change and is not subject
+  // to API stability guarantees in
+  // https://www.tensorflow.org/guide/version_compat.
+  Experimental experimental = 9;
+}
+
+// Options passed to the graph optimizer
+message OptimizerOptions {
+  // If true, optimize the graph using common subexpression elimination.
+  // Note: the optimization Level L1 will override this setting to true. So in
+  // order to disable common subexpression elimination the opt_level has to be
+  // set to L0.
+  bool do_common_subexpression_elimination = 1;
+
+  // If true, perform constant folding optimization on the graph.
+  // Note: the optimization Level L1 will override this setting to true. So in
+  // order to disable constant folding the opt_level has to be set to L0.
+  bool do_constant_folding = 2;
+
+  // Constant folding optimization replaces tensors whose values can be
+  // predetermined, with constant nodes. To avoid inserting too large constants,
+  // the size of each constant created can be limited. If this value is zero, a
+  // default limit of 10 MiB will be applied. If constant folding optimization
+  // is disabled, this value is ignored.
+  int64 max_folded_constant_in_bytes = 6;
+
+  // If true, perform function inlining on the graph.
+  bool do_function_inlining = 4;
+
+  // Optimization level
+  enum Level {
+    // L1 is the default level.
+    // Optimization performed at L1 :
+    // 1. Common subexpression elimination
+    // 2. Constant folding
+    L1 = 0;
+
+    // No optimizations
+    L0 = -1;
+  }
+
+  // Overall optimization level. The actual optimizations applied will be the
+  // logical OR of the flags that this level implies and any flags already set.
+  Level opt_level = 3;
+
+  // Control the use of the compiler/jit.  Experimental.
+  enum GlobalJitLevel {
+    DEFAULT = 0;  // Default setting ("off" now, but later expected to be "on")
+    OFF = -1;
+    // The following settings turn on compilation, with higher values being
+    // more aggressive.  Higher values may reduce opportunities for parallelism
+    // and may use more memory.  (At present, there is no distinction, but this
+    // is expected to change.)
+    ON_1 = 1;
+    ON_2 = 2;
+  }
+  GlobalJitLevel global_jit_level = 5;
+
+  // CPU code will be autoclustered only if global_jit_level >= ON_1 and either:
+  //  - this flag is true, or
+  //  - TF_XLA_FLAGS contains --tf_xla_cpu_global_jit=true.
+  bool cpu_global_jit = 7;
+}
+
+message GraphOptions {
+  // Removed, use optimizer_options below.
+  reserved "skip_common_subexpression_elimination";
+  reserved 1;
+
+  // If true, use control flow to schedule the activation of Recv nodes.
+  // (Currently ignored.)
+  bool enable_recv_scheduling = 2;
+
+  // Options controlling how graph is optimized.
+  OptimizerOptions optimizer_options = 3;
+
+  // The number of steps to run before returning a cost model detailing
+  // the memory usage and performance of each node of the graph. 0 means
+  // no cost model.
+  int64 build_cost_model = 4;
+
+  // The number of steps to skip before collecting statistics for the
+  // cost model.
+  int64 build_cost_model_after = 9;
+
+  // Annotate each Node with Op output shape data, to the extent it can
+  // be statically inferred.
+  bool infer_shapes = 5;
+
+  // Only place the subgraphs that are run, rather than the entire graph.
+  //
+  // This is useful for interactive graph building, where one might
+  // produce graphs that cannot be placed during the debugging
+  // process.  In particular, it allows the client to continue work in
+  // a session after adding a node to a graph whose placement
+  // constraints are unsatisfiable.
+  bool place_pruned_graph = 6;
+
+  // If true, transfer float values between processes as bfloat16.
+  bool enable_bfloat16_sendrecv = 7;
+
+  // If > 0, record a timeline every this many steps.
+  // EXPERIMENTAL: This currently has no effect in MasterSession.
+  int32 timeline_step = 8;
+
+  // Options that control the type and amount of graph rewriting.
+  // Not currently configurable via the public Python API (i.e. there is no API
+  // stability guarantee if you import RewriterConfig explicitly).
+  RewriterConfig rewrite_options = 10;
+}
+
+message ThreadPoolOptionProto {
+  // The number of threads in the pool.
+  //
+  // 0 means the system picks a value based on where this option proto is used
+  // (see the declaration of the specific field for more info).
+  int32 num_threads = 1;
+
+  // The global name of the threadpool.
+  //
+  // If empty, then the threadpool is made and used according to the scope it's
+  // in - e.g., for a session threadpool, it is used by that session only.
+  //
+  // If non-empty, then:
+  // - a global threadpool associated with this name is looked
+  //   up or created. This allows, for example, sharing one threadpool across
+  //   many sessions (e.g., like the default behavior, if
+  //   inter_op_parallelism_threads is not configured), but still partitioning
+  //   into a large and small pool.
+  // - if the threadpool for this global_name already exists, then it is an
+  //   error if the existing pool was created using a different num_threads
+  //   value as is specified on this call.
+  // - threadpools created this way are never garbage collected.
+  string global_name = 2;
+}
+
+// Metadata about the session.
+//
+// This can be used by the runtime and the Ops for debugging, monitoring, etc.
+//
+// The (name, version) tuple is expected to be a unique identifier for
+// sessions within the same process.
+//
+// NOTE: This is currently used and propagated only by the direct session.
+message SessionMetadata {
+  string name = 1;
+
+  // The version is optional. If set, needs to be >= 0.
+  int64 version = 2;
+}
+
+// Session configuration parameters.
+// The system picks appropriate values for fields that are not set.
+message ConfigProto {
+  // Map from device type name (e.g., "CPU" or "GPU" ) to maximum
+  // number of devices of that type to use.  If a particular device
+  // type is not found in the map, the system picks an appropriate
+  // number.
+  map<string, int32> device_count = 1;
+
+  // The execution of an individual op (for some op types) can be
+  // parallelized on a pool of intra_op_parallelism_threads.
+  // 0 means the system picks an appropriate number.
+  //
+  // If you create an ordinary session, e.g., from Python or C++,
+  // then there is exactly one intra op thread pool per process.
+  // The first session created determines the number of threads in this pool.
+  // All subsequent sessions reuse/share this one global pool.
+  //
+  // There are notable exceptions to the default behavior described above:
+  // 1. There is an environment variable  for overriding this thread pool,
+  //    named TF_OVERRIDE_GLOBAL_THREADPOOL.
+  // 2. When connecting to a server, such as a remote `tf.train.Server`
+  //    instance, then this option will be ignored altogether.
+  int32 intra_op_parallelism_threads = 2;
+
+  // Nodes that perform blocking operations are enqueued on a pool of
+  // inter_op_parallelism_threads available in each process.
+  //
+  // 0 means the system picks an appropriate number.
+  // Negative means all operations are performed in caller's thread.
+  //
+  // Note that the first Session created in the process sets the
+  // number of threads for all future sessions unless use_per_session_threads is
+  // true or session_inter_op_thread_pool is configured.
+  int32 inter_op_parallelism_threads = 5;
+
+  // If true, use a new set of threads for this session rather than the global
+  // pool of threads. Only supported by direct sessions.
+  //
+  // If false, use the global threads created by the first session, or the
+  // per-session thread pools configured by session_inter_op_thread_pool.
+  //
+  // This option is deprecated. The same effect can be achieved by setting
+  // session_inter_op_thread_pool to have one element, whose num_threads equals
+  // inter_op_parallelism_threads.
+  bool use_per_session_threads = 9;
+
+  // This option is experimental - it may be replaced with a different mechanism
+  // in the future.
+  //
+  // Configures session thread pools. If this is configured, then RunOptions for
+  // a Run call can select the thread pool to use.
+  //
+  // The intended use is for when some session invocations need to run in a
+  // background pool limited to a small number of threads:
+  // - For example, a session may be configured to have one large pool (for
+  // regular compute) and one small pool (for periodic, low priority work);
+  // using the small pool is currently the mechanism for limiting the inter-op
+  // parallelism of the low priority work.  Note that it does not limit the
+  // parallelism of work spawned by a single op kernel implementation.
+  // - Using this setting is normally not needed in training, but may help some
+  // serving use cases.
+  // - It is also generally recommended to set the global_name field of this
+  // proto, to avoid creating multiple large pools. It is typically better to
+  // run the non-low-priority work, even across sessions, in a single large
+  // pool.
+  repeated ThreadPoolOptionProto session_inter_op_thread_pool = 12;
+
+  // Assignment of Nodes to Devices is recomputed every placement_period
+  // steps until the system warms up (at which point the recomputation
+  // typically slows down automatically).
+  int32 placement_period = 3;
+
+  // When any filters are present sessions will ignore all devices which do not
+  // match the filters. Each filter can be partially specified, e.g. "/job:ps"
+  // "/job:worker/replica:3", etc.
+  repeated string device_filters = 4;
+
+  // Options that apply to all GPUs.
+  GPUOptions gpu_options = 6;
+
+  // Options that apply to pluggable devices.
+  GPUOptions pluggable_device_options = 18;
+
+  // Whether soft placement is allowed. If allow_soft_placement is true,
+  // an op will be placed on CPU if
+  //   1. there's no GPU implementation for the OP
+  // or
+  //   2. no GPU devices are known or registered
+  // or
+  //   3. need to co-locate with reftype input(s) which are from CPU.
+  bool allow_soft_placement = 7;
+
+  // Whether device placements should be logged.
+  bool log_device_placement = 8;
+
+  // Options that apply to all graphs.
+  GraphOptions graph_options = 10;
+
+  // Global timeout for all blocking operations in this session.  If non-zero,
+  // and not overridden on a per-operation basis, this value will be used as the
+  // deadline for all blocking operations.
+  int64 operation_timeout_in_ms = 11;
+
+  // Options that apply when this session uses the distributed runtime.
+  RPCOptions rpc_options = 13;
+
+  // Optional list of all workers to use in this session.
+  ClusterDef cluster_def = 14;
+
+  // If true, any resources such as Variables used in the session will not be
+  // shared with other sessions. However, when clusterspec propagation is
+  // enabled, this field is ignored and sessions are always isolated.
+  bool isolate_session_state = 15;
+
+  // When true, WorkerSessions are created with device attributes from the
+  // full cluster.
+  // This is helpful when a worker wants to partition a graph
+  // (for example during a PartitionedCallOp).
+  bool share_cluster_devices_in_session = 17;
+
+  // Everything inside Experimental is subject to change and is not subject
+  // to API stability guarantees in
+  // https://www.tensorflow.org/guide/version_compat.
+  message Experimental {
+    // Task name for group resolution.
+    string collective_group_leader = 1;
+
+    // We removed the flag client_handles_error_formatting. Marking the tag
+    // number as reserved.
+    // TODO(shikharagarwal): Should we just remove this tag so that it can be
+    // used in future for other purpose?
+    reserved 2;
+
+    // Which executor to use, the default executor will be used
+    // if it is an empty string or "DEFAULT"
+    string executor_type = 3;
+
+    // Guidance to formatting of large RecvBuf fields for transfer.
+    // Any positive value sets the max chunk size.  0 defaults to 4096.
+    // Any negative value indicates no max, i.e. one chunk only.
+    int32 recv_buf_max_chunk = 4;
+
+    // If true, and supported by the platform, the runtime will attempt to
+    // use NUMA affinity where applicable.  One consequence will be the
+    // existence of as many CPU devices as there are available NUMA nodes.
+    bool use_numa_affinity = 5;
+
+    // If true, make collective op execution order sequential and deterministic
+    // for potentially concurrent collective instances.
+    bool collective_deterministic_sequential_execution = 6;
+
+    // If true, use NCCL for CollectiveOps.  This feature is highly
+    // experimental.
+    bool collective_nccl = 7;
+
+    // In the following, session state means the value of a variable, elements
+    // in a hash table, or any other resource, accessible by worker sessions
+    // held by a TF server.
+    //
+    // When ClusterSpec propagation is enabled, the value of
+    // isolate_session_state is ignored when deciding whether to share session
+    // states in a TF server (for backwards compatibility reasons).
+    // - If share_session_state_in_clusterspec_propagation is true, the session
+    // states are shared.
+    // - If share_session_state_in_clusterspec_propagation is false, session
+    // states are isolated.
+    //
+    // When clusterspec propagation is not used, the value of
+    // share_session_state_in_clusterspec_propagation is ignored when deciding
+    // whether to share session states in a TF server.
+    // - If isolate_session_state is true, session states are isolated.
+    // - If isolate_session_state is false, session states are shared.
+    //
+    // TODO(b/129330037): Add a single API that consistently treats
+    // isolate_session_state and ClusterSpec propagation.
+    bool share_session_state_in_clusterspec_propagation = 8;
+
+    // If using a direct session, disable spinning while waiting for work in
+    // the thread pool. This may result in higher latency for completing ops,
+    // but in the case where there is a lot of spinning may result in lower
+    // CPU usage.
+    bool disable_thread_spinning = 9;
+
+    // This was promoted to a non-experimental API. Please use
+    // ConfigProto.share_cluster_devices_in_session instead.
+    bool share_cluster_devices_in_session = 10;
+
+    // Metadata about the session.
+    //
+    // If set, this can be used by the runtime and the Ops for debugging,
+    // monitoring, etc.
+    //
+    // NOTE: This is currently used and propagated only by the direct session
+    // and EagerContext.
+    SessionMetadata session_metadata = 11;
+
+    // If true, the session may treat the graph as being static for optimization
+    // purposes.
+    //
+    // If this option is set to true when a session is created, the full
+    // GraphDef must be passed in a single call to Session::Create(), and
+    // Session::Extend() may not be supported.
+    bool optimize_for_static_graph = 12;
+
+    // Whether to enable the MLIR-based TF->XLA bridge. This is only used if set
+    // to true. Default value or false is ignored. Use mlir_bridge_rollout for
+    // finer control.
+    //
+    // If this option is set to true when a session is created, MLIR is used to
+    // perform the set of graph transformations to put the graph in a form that
+    // can be executed with delegation of some computations to an accelerator.
+    // This builds on the model of XLA where a subset of the graph is
+    // encapsulated and attached to a "compile" operation, whose result is fed
+    // to an "execute" operation. The kernel for these operations is responsible
+    // to lower the encapsulated graph to a particular device.
+    bool enable_mlir_bridge = 13;
+
+    // An enum that describes the state of the MLIR bridge rollout.
+    enum MlirBridgeRollout {
+      // If this field is left unspecified, the MLIR bridge may be selectively
+      // enabled on a per graph basis.
+      MLIR_BRIDGE_ROLLOUT_UNSPECIFIED = 0;
+      // Enabling the MLIR bridge enables it for all graphs in this session.
+      MLIR_BRIDGE_ROLLOUT_ENABLED = 1;
+      // Disabling the MLIR bridge disables it for all graphs in this session.
+      MLIR_BRIDGE_ROLLOUT_DISABLED = 2;
+      reserved 3, 4;
+      reserved "MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED",
+          "MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED";
+    }
+    // Whether to enable the MLIR-based TF->XLA bridge.
+    MlirBridgeRollout mlir_bridge_rollout = 17;
+
+    // Whether to enable the MLIR-based Graph optimizations.
+    //
+    // This will become a part of standard Tensorflow graph optimization
+    // pipeline, currently this is only used for gradual migration and testing
+    // new passes that are replacing existing optimizations in Grappler.
+    bool enable_mlir_graph_optimization = 16;
+
+    // If true, the session will not store an additional copy of the graph for
+    // each subgraph.
+    //
+    // If this option is set to true when a session is created, the
+    // `RunOptions.output_partition_graphs` options must not be set.
+    bool disable_output_partition_graphs = 14;
+
+    // Minimum number of batches run through the XLA graph before XLA fusion
+    // autotuner is enabled. Default value of zero disables the autotuner.
+    //
+    // The XLA fusion autotuner can improve performance by executing a heuristic
+    // search on the compiler parameters.
+    int64 xla_fusion_autotuner_thresh = 15;
+
+    // Whether runtime execution uses TFRT.
+    bool use_tfrt = 18;
+
+    // If true, use Pathways with TFRT API for multi host support.
+    bool enable_multi_host = 27;
+
+    // If true, use ifrt as the backend for TFRT. This is only used when
+    // `use_tfrt` is true.
+    bool tfrt_use_ifrt = 32;
+
+    // Port for the Pathways server. Ignored if enable_multi_host=false.
+    int32 backend_server_port = 28;
+
+    // If true, TFRT will use TPU specific compiler passes and perform TPU
+    // specific initialization.
+    bool target_tpu = 29;
+
+    // If true, TFRT will use GPU specific compiler passes and perform GPU
+    // specific initialization.
+    bool target_gpu = 30;
+
+    // The threshold to merge small streams in TFRT. The stream with cost
+    // smaller than the threshold will be merged. Setting it to value 1
+    // disables all merges.
+    int32 stream_merge_threshold = 31;
+
+    // The field "coordination_service was previously specified as a string;
+    // this has been replaced with a message below.
+    reserved 19;
+
+    // We removed the flag fetch_remote_devices_in_multi_client. Marking the tag
+    // number as reserved.
+    reserved 20;
+
+    // Whether functional control flow op lowering should be disabled. This is
+    // useful when executing within a portable runtime where control flow op
+    // kernels may not be loaded due to selective registration.
+    bool disable_functional_ops_lowering = 21;
+
+    // Provides a hint to XLA auto clustering to prefer forming a single large
+    // cluster that encompases most of the graph.
+    bool xla_prefer_single_graph_cluster = 22;
+
+    // Distributed coordination service configurations.
+    CoordinationServiceConfig coordination_config = 23;
+
+    // If true, the session will treat the graph as being non-static for
+    // optimization purposes.
+    //
+    // If this option is set to true when a session is created, the full
+    // GraphDef will be retained to enable calls to Session::Extend().
+    // Calling Extend() without setting this flag will result in errors.
+    //
+    // This option is meant to replace `optimize_for_static_graph` and it
+    // aims to negate its value.
+    bool disable_optimize_for_static_graph = 24;
+
+    // Whether eager remote execution will stream all the function calls or
+    // allow them to happen in parallel. When true, streaming execution is
+    // disabled, and parallel execution is allowed.
+    bool disable_eager_executor_streaming_enqueue = 26;
+
+    reserved 25;
+
+    // Next: 33
+  }
+
+  Experimental experimental = 16;
+
+  // Next: 19
+}
+
+// Options for a single Run() call.
+message RunOptions {
+  // TODO(pbar) Turn this into a TraceOptions proto which allows
+  // tracing to be controlled in a more orthogonal manner?
+  enum TraceLevel {
+    NO_TRACE = 0;
+    SOFTWARE_TRACE = 1;
+    HARDWARE_TRACE = 2;
+    FULL_TRACE = 3;
+  }
+  TraceLevel trace_level = 1;
+
+  // Time to wait for operation to complete in milliseconds.
+  int64 timeout_in_ms = 2;
+
+  // The thread pool to use, if session_inter_op_thread_pool is configured.
+  // To use the caller thread set this to -1 - this uses the caller thread
+  // to execute Session::Run() and thus avoids a context switch. Using the
+  // caller thread to execute Session::Run() should be done ONLY for simple
+  // graphs, where the overhead of an additional context switch is
+  // comparable with the overhead of Session::Run().
+  int32 inter_op_thread_pool = 3;
+
+  // Whether the partition graph(s) executed by the executor(s) should be
+  // outputted via RunMetadata.
+  bool output_partition_graphs = 5;
+
+  // EXPERIMENTAL.  Options used to initialize DebuggerState, if enabled.
+  DebugOptions debug_options = 6;
+
+  // When enabled, causes tensor allocation information to be included in
+  // the error message when the Run() call fails because the allocator ran
+  // out of memory (OOM).
+  //
+  // Enabling this option can slow down the Run() call.
+  bool report_tensor_allocations_upon_oom = 7;
+
+  // Everything inside Experimental is subject to change and is not subject
+  // to API stability guarantees in
+  // https://www.tensorflow.org/guide/version_compat.
+  message Experimental {
+    // If non-zero, declares that this graph is going to use collective
+    // ops and must synchronize step_ids with any other graph with this
+    // same group_key value (in a distributed computation where tasks
+    // run disjoint graphs).
+    int64 collective_graph_key = 1;
+    // If true, then operations (using the inter-op pool) across all
+    // session::run() calls will be centrally scheduled, optimizing for (median
+    // and tail) latency.
+    // Consider using this option for CPU-bound workloads like inference.
+    bool use_run_handler_pool = 2;
+    // Options for run handler thread pool.
+    message RunHandlerPoolOptions {
+      // Priority of the request. The run handler thread pool will schedule ops
+      // based on the priority number. The larger number means higher priority.
+      int64 priority = 1;
+    }
+    RunHandlerPoolOptions run_handler_pool_options = 3;
+  }
+
+  Experimental experimental = 8;
+
+  reserved 4;
+}
+
+// Metadata output (i.e., non-Tensor) for a single Run() call.
+message RunMetadata {
+  // Statistics traced for this step. Populated if tracing is turned on via the
+  // "RunOptions" proto.
+  // EXPERIMENTAL: The format and set of events may change in future versions.
+  StepStats step_stats = 1;
+
+  // The cost graph for the computation defined by the run call.
+  CostGraphDef cost_graph = 2;
+
+  // Graphs of the partitions executed by executors.
+  repeated GraphDef partition_graphs = 3;
+
+  message FunctionGraphs {
+    // TODO(nareshmodi): Include some sort of function/cache-key identifier?
+    repeated GraphDef partition_graphs = 1;
+
+    GraphDef pre_optimization_graph = 2;
+    GraphDef post_optimization_graph = 3;
+  }
+  // This is only populated for graphs that are run as functions in TensorFlow
+  // V2. There will be an entry below for each function that is traced.
+  // The main use cases of the post_optimization_graph and the partition_graphs
+  // is to give the caller insight into the graphs that were actually run by the
+  // runtime. Additional information (such as those in step_stats) will match
+  // these graphs.
+  // We also include the pre_optimization_graph since it is usually easier to
+  // read, and is helpful in situations where the caller wants to get a high
+  // level idea of what the built graph looks like (since the various graph
+  // optimization passes might change the structure of the graph significantly).
+  repeated FunctionGraphs function_graphs = 4;
+
+  // Metadata about the session.
+  SessionMetadata session_metadata = 5;
+}
+
+// Defines a connection between two tensors in a `GraphDef`.
+message TensorConnection {
+  // A tensor name. The value of this tensor will be substituted for
+  // the tensor named in `to_tensor`.
+  string from_tensor = 1;
+
+  // A tensor name. The value of this tensor will be bound to the
+  // value of the tensor named in `from_tensor`.
+  string to_tensor = 2;
+}
+
+// Defines a subgraph in another `GraphDef` as a set of feed points and nodes
+// to be fetched or executed.
+//
+// Compare with the arguments to `Session::Run()`.
+message CallableOptions {
+  // Tensors to be fed in the callable. Each feed is the name of a tensor.
+  repeated string feed = 1;
+
+  // Fetches. A list of tensor names. The caller of the callable expects a
+  // tensor to be returned for each fetch[i] (see RunStepResponse.tensor). The
+  // order of specified fetches does not change the execution order.
+  repeated string fetch = 2;
+
+  // Target Nodes. A list of node names. The named nodes will be run by the
+  // callable but their outputs will not be returned.
+  repeated string target = 3;
+
+  // Options that will be applied to each run.
+  RunOptions run_options = 4;
+
+  // Tensors to be connected in the callable. Each TensorConnection denotes
+  // a pair of tensors in the graph, between which an edge will be created
+  // in the callable.
+  repeated TensorConnection tensor_connection = 5;
+
+  // The Tensor objects fed in the callable and fetched from the callable
+  // are expected to be backed by host (CPU) memory by default.
+  //
+  // The options below allow changing that - feeding tensors backed by
+  // device memory, or returning tensors that are backed by device memory.
+  //
+  // The maps below map the name of a feed/fetch tensor (which appears in
+  // 'feed' or 'fetch' fields above), to the fully qualified name of the device
+  // owning the memory backing the contents of the tensor.
+  //
+  // For example, creating a callable with the following options:
+  //
+  // CallableOptions {
+  //   feed: "a:0"
+  //   feed: "b:0"
+  //
+  //   fetch: "x:0"
+  //   fetch: "y:0"
+  //
+  //   feed_devices: {
+  //     "a:0": "/job:localhost/replica:0/task:0/device:GPU:0"
+  //   }
+  //
+  //   fetch_devices: {
+  //     "y:0": "/job:localhost/replica:0/task:0/device:GPU:0"
+  //  }
+  // }
+  //
+  // means that the Callable expects:
+  // - The first argument ("a:0") is a Tensor backed by GPU memory.
+  // - The second argument ("b:0") is a Tensor backed by host memory.
+  // and of its return values:
+  // - The first output ("x:0") will be backed by host memory.
+  // - The second output ("y:0") will be backed by GPU memory.
+  //
+  // FEEDS:
+  // It is the responsibility of the caller to ensure that the memory of the fed
+  // tensors will be correctly initialized and synchronized before it is
+  // accessed by operations executed during the call to Session::RunCallable().
+  //
+  // This is typically ensured by using the TensorFlow memory allocators
+  // (Device::GetAllocator()) to create the Tensor to be fed.
+  //
+  // Alternatively, for CUDA-enabled GPU devices, this typically means that the
+  // operation that produced the contents of the tensor has completed, i.e., the
+  // CUDA stream has been synchronized (e.g., via cuCtxSynchronize() or
+  // cuStreamSynchronize()).
+  map<string, string> feed_devices = 6;
+  map<string, string> fetch_devices = 7;
+
+  // By default, RunCallable() will synchronize the GPU stream before returning
+  // fetched tensors on a GPU device, to ensure that the values in those tensors
+  // have been produced. This simplifies interacting with the tensors, but
+  // potentially incurs a performance hit.
+  //
+  // If this options is set to true, the caller is responsible for ensuring
+  // that the values in the fetched tensors have been produced before they are
+  // used. The caller can do this by invoking `Device::Sync()` on the underlying
+  // device(s), or by feeding the tensors back to the same Session using
+  // `feed_devices` with the same corresponding device name.
+  bool fetch_skip_sync = 8;
+
+  // Next: 9
+}
+
+message BatchingOptions {
+  // Number of scheduling threads for processing batches of work. Determines
+  // the number of batches processed in parallel. This should be roughly in line
+  // with the number of TPU cores available.
+  int32 num_batch_threads = 1;
+
+  // The maximum allowed batch size. Can be larger than allowed_batch_sizes to
+  // utilize large batch splitting.
+  int32 max_batch_size = 2;
+
+  // Maximum number of microseconds to wait before outputting an incomplete
+  // batch.
+  int32 batch_timeout_micros = 3;
+
+  // Optional list of allowed batch sizes. If left empty, does nothing.
+  // Otherwise, supplies a list of batch sizes, causing the op to pad batches up
+  // to one of those sizes. The entries must increase monotonically, and the
+  // final entry must be equal or less than the max_batch_size.
+  repeated int32 allowed_batch_sizes = 4;
+
+  // Maximum number of batches enqueued for processing before requests are
+  // failed fast.
+  int32 max_enqueued_batches = 5;
+}
diff --git a/src/main/proto/tensorflow/core/protobuf/debug.proto b/src/main/proto/tensorflow/core/protobuf/debug.proto
new file mode 100644
index 0000000..2fabd03
--- /dev/null
+++ b/src/main/proto/tensorflow/core/protobuf/debug.proto
@@ -0,0 +1,94 @@
+syntax = "proto3";
+
+package tensorflow;
+
+option cc_enable_arenas = true;
+option java_outer_classname = "DebugProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto";
+
+// Option for watching a node in TensorFlow Debugger (tfdbg).
+message DebugTensorWatch {
+  // Name of the node to watch.
+  // Use "*" for wildcard. But note: currently, regex is not supported in
+  // general.
+  string node_name = 1;
+
+  // Output slot to watch.
+  // The semantics of output_slot == -1 is that all outputs of the node
+  // will be watched (i.e., a wildcard).
+  // Other negative values of output_slot are invalid and will lead to
+  // errors currently.
+  int32 output_slot = 2;
+
+  // Name(s) of the debugging op(s).
+  // One or more than one probes on a tensor.
+  // e.g., {"DebugIdentity", "DebugNanCount"}
+  repeated string debug_ops = 3;
+
+  // URL(s) for debug targets(s).
+  //
+  // Supported URL formats are:
+  //   - file:///foo/tfdbg_dump: Writes out Event content to file
+  //     /foo/tfdbg_dump.  Assumes all directories can be created if they don't
+  //     already exist.
+  //   - grpc://localhost:11011: Sends an RPC request to an EventListener
+  //     service running at localhost:11011 with the event.
+  //   - memcbk:///event_key: Routes tensors to clients using the
+  //     callback registered with the DebugCallbackRegistry for event_key.
+  //
+  // Each debug op listed in debug_ops will publish its output tensor (debug
+  // signal) to all URLs in debug_urls.
+  //
+  // N.B. Session::Run() supports concurrent invocations of the same inputs
+  // (feed keys), outputs and target nodes. If such concurrent invocations
+  // are to be debugged, the callers of Session::Run() must use distinct
+  // debug_urls to make sure that the streamed or dumped events do not overlap
+  // among the invocations.
+  // TODO(cais): More visible documentation of this in g3docs.
+  repeated string debug_urls = 4;
+
+  // Do not error out if debug op creation fails (e.g., due to dtype
+  // incompatibility). Instead, just log the failure.
+  bool tolerate_debug_op_creation_failures = 5;
+}
+
+// Options for initializing DebuggerState in TensorFlow Debugger (tfdbg).
+message DebugOptions {
+  // Debugging options
+  repeated DebugTensorWatch debug_tensor_watch_opts = 4;
+
+  // Caller-specified global step count.
+  // Note that this is distinct from the session run count and the executor
+  // step count.
+  int64 global_step = 10;
+
+  // Whether the total disk usage of tfdbg is to be reset to zero
+  // in this Session.run call. This is used by wrappers and hooks
+  // such as the local CLI ones to indicate that the dumped tensors
+  // are cleaned up from the disk after each Session.run.
+  bool reset_disk_byte_usage = 11;
+}
+
+message DebuggedSourceFile {
+  // The host name on which a source code file is located.
+  string host = 1;
+
+  // Path to the source code file.
+  string file_path = 2;
+
+  // The timestamp at which the source code file is last modified.
+  int64 last_modified = 3;
+
+  // Byte size of the file.
+  int64 bytes = 4;
+
+  // Line-by-line content of the source code file.
+  repeated string lines = 5;
+}
+
+message DebuggedSourceFiles {
+  // A collection of source code files.
+  repeated DebuggedSourceFile source_files = 1;
+}
diff --git a/src/main/proto/tensorflow/core/protobuf/error_codes.proto b/src/main/proto/tensorflow/core/protobuf/error_codes.proto
new file mode 100644
index 0000000..6842c69
--- /dev/null
+++ b/src/main/proto/tensorflow/core/protobuf/error_codes.proto
@@ -0,0 +1,11 @@
+syntax = "proto3";
+
+// Add a dummy package name. Having no package, like
+// core/lib/core/error_codes.proto, or having tensorflow.error, like
+// tsl/protobuf/error_codes.proto, results in name collision errors in generated
+// code for some users that use JS through J2CL.
+package tensorflow.error.dummy;
+
+import public "xla/tsl/protobuf/error_codes.proto";
+
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto";
diff --git a/src/main/proto/tensorflow/core/protobuf/meta_graph.proto b/src/main/proto/tensorflow/core/protobuf/meta_graph.proto
new file mode 100644
index 0000000..b525c52
--- /dev/null
+++ b/src/main/proto/tensorflow/core/protobuf/meta_graph.proto
@@ -0,0 +1,286 @@
+syntax = "proto3";
+
+package tensorflow;
+
+import "google/protobuf/any.proto";
+import "tensorflow/core/framework/graph.proto";
+import "tensorflow/core/framework/op_def.proto";
+import "tensorflow/core/framework/tensor.proto";
+import "tensorflow/core/framework/tensor_shape.proto";
+import "tensorflow/core/framework/types.proto";
+import "tensorflow/core/protobuf/saved_object_graph.proto";
+import "tensorflow/core/protobuf/saver.proto";
+import "tensorflow/core/protobuf/struct.proto";
+
+option cc_enable_arenas = true;
+option java_outer_classname = "MetaGraphProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto";
+
+// Protocol buffer containing the following which are necessary to restart
+// training, run inference. It can be used to serialize/de-serialize memory
+// objects necessary for running computation in a graph when crossing the
+// process boundary. It can be used for long term storage of graphs,
+// cross-language execution of graphs, etc.
+//   MetaInfoDef
+//   GraphDef
+//   SaverDef
+//   CollectionDef
+//   TensorInfo
+//   SignatureDef
+message MetaGraphDef {
+  // Meta information regarding the graph to be exported.  To be used by users
+  // of this protocol buffer to encode information regarding their meta graph.
+  message MetaInfoDef {
+    // User specified Version string. Can be the name of the model and revision,
+    // steps this model has been trained to, etc.
+    string meta_graph_version = 1;
+
+    // A copy of the OpDefs used by the producer of this graph_def.
+    // Descriptions and Ops not used in graph_def are stripped out.
+    OpList stripped_op_list = 2;
+
+    // A serialized protobuf. Can be the time this meta graph is created, or
+    // modified, or name of the model.
+    google.protobuf.Any any_info = 3;
+
+    // User supplied tag(s) on the meta_graph and included graph_def.
+    //
+    // MetaGraphDefs should be tagged with their capabilities or use-cases.
+    // Examples: "train", "serve", "gpu", "tpu", etc.
+    // These tags enable loaders to access the MetaGraph(s) appropriate for a
+    // specific use-case or runtime environment.
+    repeated string tags = 4;
+
+    // The __version__ string of the tensorflow build used to write this graph.
+    // This will be populated by the framework, which will overwrite any user
+    // supplied value.
+    string tensorflow_version = 5;
+
+    // The __git_version__ string of the tensorflow build used to write this
+    // graph. This will be populated by the framework, which will overwrite any
+    // user supplied value.
+    string tensorflow_git_version = 6;
+
+    // A flag to denote whether default-valued attrs have been stripped from
+    // the nodes in this graph_def.
+    bool stripped_default_attrs = 7;
+
+    // FunctionDef name to aliases mapping.
+    map<string, string> function_aliases = 8;
+  }
+  MetaInfoDef meta_info_def = 1;
+
+  // GraphDef.
+  GraphDef graph_def = 2;
+
+  // SaverDef.
+  SaverDef saver_def = 3;
+
+  // collection_def: Map from collection name to collections.
+  // See CollectionDef section for details.
+  map<string, CollectionDef> collection_def = 4;
+
+  // signature_def: Map from user supplied key for a signature to a single
+  // SignatureDef.
+  map<string, SignatureDef> signature_def = 5;
+
+  // Asset file def to be used with the defined graph.
+  repeated AssetFileDef asset_file_def = 6;
+
+  // Extra information about the structure of functions and stateful objects.
+  SavedObjectGraph object_graph_def = 7;
+}
+
+// CollectionDef should cover most collections.
+// To add a user-defined collection, do one of the following:
+// 1. For simple data types, such as string, int, float:
+//      tf.add_to_collection("your_collection_name", your_simple_value)
+//    strings will be stored as bytes_list.
+//
+// 2. For Protobuf types, there are three ways to add them:
+//    1) tf.add_to_collection("your_collection_name",
+//         your_proto.SerializeToString())
+//
+//       collection_def {
+//         key: "user_defined_bytes_collection"
+//         value {
+//           bytes_list {
+//             value: "queue_name: \"test_queue\"\n"
+//           }
+//         }
+//       }
+//
+//  or
+//
+//    2) tf.add_to_collection("your_collection_name", str(your_proto))
+//
+//       collection_def {
+//         key: "user_defined_string_collection"
+//         value {
+//          bytes_list {
+//             value: "\n\ntest_queue"
+//           }
+//         }
+//       }
+//
+//  or
+//
+//    3) any_buf = any_pb2.Any()
+//       tf.add_to_collection("your_collection_name",
+//         any_buf.Pack(your_proto))
+//
+//       collection_def {
+//         key: "user_defined_any_collection"
+//         value {
+//           any_list {
+//             value {
+//               type_url: "type.googleapis.com/tensorflow.QueueRunnerDef"
+//               value: "\n\ntest_queue"
+//             }
+//           }
+//         }
+//       }
+//
+// 3. For Python objects, implement to_proto() and from_proto(), and register
+//    them in the following manner:
+//    ops.register_proto_function("your_collection_name",
+//                                proto_type,
+//                                to_proto=YourPythonObject.to_proto,
+//                                from_proto=YourPythonObject.from_proto)
+//    These functions will be invoked to serialize and de-serialize the
+//    collection. For example,
+//    ops.register_proto_function(ops.GraphKeys.GLOBAL_VARIABLES,
+//                                proto_type=variable_pb2.VariableDef,
+//                                to_proto=Variable.to_proto,
+//                                from_proto=Variable.from_proto)
+message CollectionDef {
+  // NodeList is used for collecting nodes in graph. For example
+  // collection_def {
+  //   key: "summaries"
+  //   value {
+  //     node_list {
+  //       value: "input_producer/ScalarSummary:0"
+  //       value: "shuffle_batch/ScalarSummary:0"
+  //       value: "ImageSummary:0"
+  //     }
+  //   }
+  message NodeList {
+    repeated string value = 1;
+  }
+
+  // BytesList is used for collecting strings and serialized protobufs. For
+  // example:
+  // collection_def {
+  //   key: "trainable_variables"
+  //   value {
+  //     bytes_list {
+  //       value: "\n\017conv1/weights:0\022\024conv1/weights/Assign
+  //              \032\024conv1/weights/read:0"
+  //       value: "\n\016conv1/biases:0\022\023conv1/biases/Assign\032
+  //              \023conv1/biases/read:0"
+  //     }
+  //   }
+  // }
+  message BytesList {
+    repeated bytes value = 1;
+  }
+
+  // Int64List is used for collecting int, int64 and long values.
+  message Int64List {
+    repeated int64 value = 1 [packed = true];
+  }
+
+  // FloatList is used for collecting float values.
+  message FloatList {
+    repeated float value = 1 [packed = true];
+  }
+
+  // AnyList is used for collecting Any protos.
+  message AnyList {
+    repeated google.protobuf.Any value = 1;
+  }
+
+  oneof kind {
+    NodeList node_list = 1;
+    BytesList bytes_list = 2;
+    Int64List int64_list = 3;
+    FloatList float_list = 4;
+    AnyList any_list = 5;
+  }
+}
+
+// Information about a Tensor necessary for feeding or retrieval.
+message TensorInfo {
+  // For sparse tensors, The COO encoding stores a triple of values, indices,
+  // and shape.
+  message CooSparse {
+    // The shape of the values Tensor is [?].  Its dtype must be the dtype of
+    // the SparseTensor as a whole, given in the enclosing TensorInfo.
+    string values_tensor_name = 1;
+
+    // The indices Tensor must have dtype int64 and shape [?, ?].
+    string indices_tensor_name = 2;
+
+    // The dynamic logical shape represented by the SparseTensor is recorded in
+    // the Tensor referenced here.  It must have dtype int64 and shape [?].
+    string dense_shape_tensor_name = 3;
+  }
+
+  // Generic encoding for composite tensors.
+  message CompositeTensor {
+    // The serialized TypeSpec for the composite tensor.
+    TypeSpecProto type_spec = 1;
+
+    // A TensorInfo for each flattened component tensor.
+    repeated TensorInfo components = 2;
+  }
+
+  oneof encoding {
+    // For dense `Tensor`s, the name of the tensor in the graph.
+    string name = 1;
+    // There are many possible encodings of sparse matrices
+    // (https://en.wikipedia.org/wiki/Sparse_matrix).  Currently, TensorFlow
+    // uses only the COO encoding.  This is supported and documented in the
+    // SparseTensor Python class.
+    CooSparse coo_sparse = 4;
+    // Generic encoding for CompositeTensors.
+    CompositeTensor composite_tensor = 5;
+  }
+  DataType dtype = 2;
+  // The static shape should be recorded here, to the extent that it can
+  // be known in advance.  In the case of a SparseTensor, this field describes
+  // the logical shape of the represented tensor (aka dense_shape).
+  TensorShapeProto tensor_shape = 3;
+}
+
+// SignatureDef defines the signature of a computation supported by a TensorFlow
+// graph.
+message SignatureDef {
+  // Named input parameters.
+  map<string, TensorInfo> inputs = 1;
+  // Named output parameters.
+  map<string, TensorInfo> outputs = 2;
+  // Deprecated: TensorFlow 2 always sets this to a fixed value;
+  // open-source TF Serving stopped checking by default since release 2.4.
+  //
+  // In TensorFlow 1, the method_name enabled users to mark a SignatureDef as
+  // supporting a particular method. Multiple SignatureDefs in a single
+  // MetaGraphDef could have the same method_name (e.g., to support multi-headed
+  // computation).
+  string method_name = 3;
+  // Named input to corresponding default values if any.
+  map<string, TensorProto> defaults = 4;
+}
+
+// An asset file def for a single file or a set of sharded files with the same
+// name.
+message AssetFileDef {
+  // The tensor to bind the asset filename to.
+  TensorInfo tensor_info = 1;
+  // The filename within an assets directory. Note: does not include the path
+  // prefix, i.e. directories. For an asset at /tmp/path/vocab.txt, the filename
+  // would be "vocab.txt".
+  string filename = 2;
+}
diff --git a/src/main/proto/tensorflow/core/protobuf/named_tensor.proto b/src/main/proto/tensorflow/core/protobuf/named_tensor.proto
new file mode 100644
index 0000000..8d401a0
--- /dev/null
+++ b/src/main/proto/tensorflow/core/protobuf/named_tensor.proto
@@ -0,0 +1,25 @@
+syntax = "proto3";
+
+package tensorflow;
+
+import "tensorflow/core/framework/tensor.proto";
+
+option cc_enable_arenas = true;
+option java_outer_classname = "NamedTensorProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto";
+
+// A pair of tensor name and tensor values.
+message NamedTensorProto {
+  // Name of the tensor.
+  string name = 1;
+
+  // The client can populate a TensorProto using a tensorflow::Tensor`, or
+  // directly using the protobuf field accessors.
+  //
+  // The client specifies whether the returned tensor values should be
+  // filled tensor fields (float_val, int_val, etc.) or encoded in a
+  // compact form in tensor.tensor_content.
+  TensorProto tensor = 2;
+}
diff --git a/src/main/proto/tensorflow/core/protobuf/rewriter_config.proto b/src/main/proto/tensorflow/core/protobuf/rewriter_config.proto
new file mode 100644
index 0000000..f98d192
--- /dev/null
+++ b/src/main/proto/tensorflow/core/protobuf/rewriter_config.proto
@@ -0,0 +1,241 @@
+syntax = "proto3";
+
+package tensorflow;
+
+import "tensorflow/core/framework/attr_value.proto";
+import "tensorflow/core/protobuf/verifier_config.proto";
+
+option cc_enable_arenas = true;
+option java_outer_classname = "RewriterConfigProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto";
+
+message AutoParallelOptions {
+  bool enable = 1;
+  int32 num_replicas = 2;
+}
+
+message ScopedAllocatorOptions {
+  // If present, only perform optimization for these ops.
+  repeated string enable_op = 1;
+}
+
+message RewriterConfig {
+  // Graph rewriting is experimental and subject to change, not covered by any
+  // API stability guarantees.
+
+  // Configuration options for the meta-optimizer. Unless otherwise noted, these
+  // configuration options do not apply to explicitly triggered optimization
+  // passes in the optimizers field.
+
+  enum Toggle {
+    DEFAULT = 0;
+    ON = 1;
+    OFF = 2;
+    // Enable some aggressive optimizations that use assumptions that TF graphs
+    // may break. For example, assume the shape of a placeholder matches its
+    // actual feed.
+    AGGRESSIVE = 3;
+    // Run MLIR pass if there's one implemented in TFG, do nothing otherwise.
+    // I.e., if there's no corresponding TFG pass, it's an OFF. This is supposed
+    // to be mapped with `ON` and there's no `AGGRESSIVE` in MLIR pass now.
+    EXPERIMENTAL_MLIR = 4;
+    // Run both MLIR and Grappler passes consecutively and MLIR pass will come
+    // first.
+    EXPERIMENTAL_BOTH = 5;
+  }
+
+  // Enum for layout conversion between NCHW and NHWC on CPU. Default is OFF.
+  enum CpuLayout {
+    NO_CONVERSION_ON_CPU = 0;
+    NCHW_TO_NHWC = 1;
+    NHWC_TO_NCHW = 2;
+  }
+
+  // Enum controlling the number of times to run optimizers. The default is to
+  // run them twice.
+  enum NumIterationsType {
+    DEFAULT_NUM_ITERS = 0;
+    ONE = 1;
+    TWO = 2;
+  }
+
+  // CPU Conversion settings between NHCW and NCHW.
+  CpuLayout cpu_layout_conversion = 50;
+
+  // Optimize tensor layouts (default is ON)
+  // e.g. This will try to use NCHW layout on GPU which is faster.
+  Toggle layout_optimizer = 1;
+  // Fold constants (default is ON)
+  // Statically infer the value of tensors when possible, and materialize the
+  // result using constants.
+  Toggle constant_folding = 3;
+  // Shape optimizations (default is ON)
+  // Simplify computations made on shapes.
+  Toggle shape_optimization = 13;
+  // Remapping (default is ON)
+  // Remap subgraphs onto more efficient implementations.
+  Toggle remapping = 14;
+  // Common subgraph elimination (default is ON)
+  // e.g. Simplify arithmetic ops; merge ops with same value (like constants).
+  Toggle common_subgraph_elimination = 24;
+  // Arithmetic optimizations (default is ON)
+  // e.g. Simplify arithmetic ops; merge ops with same value (like constants).
+  Toggle arithmetic_optimization = 7;
+  // Control dependency optimizations (default is ON).
+  // Remove redundant control dependencies, which may enable other optimization.
+  Toggle dependency_optimization = 8;
+  // Loop optimizations (default is ON).
+  Toggle loop_optimization = 9;
+  // Function optimizations (default is ON).
+  Toggle function_optimization = 10;
+  // Strips debug-related nodes from the graph (off by default).
+  Toggle debug_stripper = 11;
+  // If true, don't remove unnecessary ops from the graph
+  bool disable_model_pruning = 2;
+  // Try to allocate some independent Op outputs contiguously in order to
+  // merge or eliminate downstream Ops (off by default).
+  Toggle scoped_allocator_optimization = 15;
+  // Force small ops onto the CPU (default is OFF).
+  Toggle pin_to_host_optimization = 18;
+  // Enable the swap of kernel implementations based on the device placement
+  // (default is ON).
+  Toggle implementation_selector = 22;
+  // Optimize data types for CUDA/oneDNN (default is OFF).
+  // This will try to use float16 on GPU/CPU which is faster.
+  // Note that this can change the numerical stability of the graph and may
+  // require the use of loss scaling to maintain model convergence.
+  Toggle auto_mixed_precision = 23;
+  // Optimize data types for oneDNN (default is OFF).
+  // This will try to use bfloat16 on CPUs, which is faster.
+  // Note that this can change the numerical stability of the graph.
+  // Note: this is deprecated.
+  // It is replaced by auto_mixed_precision_onednn_bfloat16
+  Toggle auto_mixed_precision_mkl = 25;
+  // Optimize data types for oneDNN (default is OFF).
+  // This will try to use bfloat16 on CPUs, which is faster.
+  // Note that this can change the numerical stability of the graph.
+  // Note: this is equivalent to the deprecated option auto_mixed_precision_mkl
+  Toggle auto_mixed_precision_onednn_bfloat16 = 31;
+  // Emulate a model using data type float16 on CPU (default is OFF).
+  // This will try to emulate the float16 inputs and outputs of an operator
+  // on CPU to have better correlation with float16 on GPU; however the
+  // computation in the operator is based on float32.
+  // Note that this can change the numerical stability of the graph.
+  Toggle auto_mixed_precision_cpu = 29;
+  // Disable the entire meta optimizer (off by default).
+  bool disable_meta_optimizer = 19;
+  // Disable the TFG optimizer (off by default).
+  bool disable_tfg_optimizer = 32;
+  // Optimizers registered by plugin (default is ON)
+  Toggle use_plugin_optimizers = 28;
+  // Conditional code motion (default is ON).
+  Toggle experimental_conditional_code_motion = 30;
+
+  // Controls how many times we run the optimizers in meta optimizer (default
+  // is once).
+  NumIterationsType meta_optimizer_iterations = 12;
+
+  // The minimum number of nodes in a graph to optimizer. For smaller graphs,
+  // optimization is skipped.
+  // 0 means the system picks an appropriate number.
+  // < 0 means do not skip optimization.
+  int32 min_graph_nodes = 17;
+
+  // Disable optimizations that assume compressed tensors. Note that this flag
+  // is experimental and may be removed in the future.
+  bool experimental_disable_compressed_tensor_optimization = 26;
+
+  // Disable folding quantization emulation ops such as FakeQuantWithMinMax* and
+  // QuantizeAndDequantize*. Some compilers (e.g. the TF-to-tflite converter)
+  // have to extract quantization configs (e.g. min/max range, number of bits,
+  // and per-channel) from the quantization emulation ops. Note that this flag
+  // is experimental and may be removed in the future. See b/174138564 for more
+  // details.
+  bool experimental_disable_folding_quantization_emulation = 27;
+
+  enum MemOptType {
+    // The default setting (SCHEDULING and SWAPPING HEURISTICS only)
+    DEFAULT_MEM_OPT = 0;
+    // Disabled in the meta-optimizer.
+    NO_MEM_OPT = 1;
+    // Driven by manual op-level annotations.
+    MANUAL = 2;
+
+    // Driven by heuristics. The behavior of these heuristics is subject to
+    // change. Currently includes an experimental recomputation and swapping
+    // heuristics. Manual annotations are respected, but additional nodes are
+    // selected automatically.
+
+    // Swapping heuristic will move a tensor from the GPU to the CPU and move
+    // it back when needed to reduce peak memory usage.
+    SWAPPING_HEURISTICS = 4;
+    // Recomputation heuristics will recompute ops (such as Relu activation)
+    // during backprop instead of storing them, reducing peak memory usage.
+    RECOMPUTATION_HEURISTICS = 5;
+    // Scheduling will split big ops such as AddN and try to enforce a schedule
+    // of the new computations that decreases peak memory usage.
+    SCHEDULING_HEURISTICS = 6;
+    // Use any combination of swapping and recomputation heuristics.
+    HEURISTICS = 3;
+  }
+  // Configures memory optimization passes through the meta-optimizer. Has no
+  // effect on manually requested memory optimization passes in the optimizers
+  // field.
+  MemOptType memory_optimization = 4;
+  // A node name scope for node names which are valid outputs of recomputations.
+  // Inputs to nodes that match this scope may be recomputed (subject either to
+  // manual annotation of those input nodes or to manual annotation and
+  // heuristics depending on memory_optimization), but the nodes themselves will
+  // not be recomputed. This matches any sub-scopes as well, meaning the scope
+  // can appear not just as a top-level scope. For example, if the value is
+  // "gradients/", the default, it will match node name "gradients/foo",
+  // "foo/gradients/bar", but not "foo_gradients/"
+  string memory_optimizer_target_node_name_scope = 6;
+  // Maximum number of milliseconds to spend optimizing a single graph before
+  // timing out. If less than or equal to 0 (default value) the optimizer will
+  // never time out.
+  int64 meta_optimizer_timeout_ms = 20;
+
+  // Configures AutoParallel optimization passes either through the
+  // meta-optimizer or when manually specified through the optimizers field.
+  AutoParallelOptions auto_parallel = 5;
+
+  // If true, any optimization pass failing will cause the MetaOptimizer to
+  // stop with an error. By default - or when set to false, failing passes are
+  // skipped silently.
+  bool fail_on_optimizer_errors = 21;
+
+  ScopedAllocatorOptions scoped_allocator_opts = 16;
+
+  // If non-empty, will use this as an alternative way to specify a list of
+  // optimizations to turn on and the order of the optimizations (replacing the
+  // meta-optimizer).
+  //
+  // Of the RewriterConfig options, only the AutoParallel configuration options
+  // (the auto_parallel field) apply to manually requested optimization passes
+  // ("autoparallel"). Memory optimization passes ("memory") invoked here are
+  // not configurable (in contrast to memory optimization passes through the
+  // meta-optimizer) and act only on manual op annotations.
+  //
+  // Custom optimizers (see custom_optimizers) that are not part of this
+  // schedule will be run after - in the order that they were specified.
+  repeated string optimizers = 100;
+
+  // Message to describe custom graph optimizer and its parameters
+  message CustomGraphOptimizer {
+    string name = 1;
+    map<string, AttrValue> parameter_map = 2;
+  }
+
+  // list of CustomGraphOptimizers to apply.
+  repeated CustomGraphOptimizer custom_optimizers = 200;
+
+  // VerifierConfig specifying the verifiers to be run after every optimizer.
+  VerifierConfig inter_optimizer_verifier_config = 300;
+
+  // VerifierConfig specifying the verifiers to be run at the end, after all
+  // optimizers have run.
+  VerifierConfig post_optimization_verifier_config = 301;
+}
diff --git a/src/main/proto/tensorflow/core/protobuf/rpc_options.proto b/src/main/proto/tensorflow/core/protobuf/rpc_options.proto
new file mode 100644
index 0000000..03593a6
--- /dev/null
+++ b/src/main/proto/tensorflow/core/protobuf/rpc_options.proto
@@ -0,0 +1,7 @@
+syntax = "proto3";
+
+package tensorflow.dummy;
+
+import public "xla/tsl/protobuf/rpc_options.proto";
+
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto";
diff --git a/src/main/proto/tensorflow/core/protobuf/saved_object_graph.proto b/src/main/proto/tensorflow/core/protobuf/saved_object_graph.proto
new file mode 100644
index 0000000..a59ad0e
--- /dev/null
+++ b/src/main/proto/tensorflow/core/protobuf/saved_object_graph.proto
@@ -0,0 +1,251 @@
+syntax = "proto3";
+
+package tensorflow;
+
+import "google/protobuf/any.proto";
+import "tensorflow/core/framework/tensor_shape.proto";
+import "tensorflow/core/framework/types.proto";
+import "tensorflow/core/framework/variable.proto";
+import "tensorflow/core/framework/versions.proto";
+import "tensorflow/core/protobuf/struct.proto";
+import "tensorflow/core/protobuf/trackable_object_graph.proto";
+
+option cc_enable_arenas = true;
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto";
+
+// A SavedObjectGraph is part of object-based SavedModels in TF 2.0. It
+// describes the directed graph of Python objects (or equivalent in other
+// languages) that make up a model, with nodes[0] at the root.
+
+// SavedObjectGraph shares some structure with TrackableObjectGraph, but
+// SavedObjectGraph belongs to the MetaGraph and contains pointers to functions
+// and type information, while TrackableObjectGraph lives in the checkpoint
+// and contains pointers only to variable values.
+
+message SavedObjectGraph {
+  // Flattened list of objects in the object graph.
+  //
+  // The position of the object in this list indicates its id.
+  // Nodes[0] is considered the root node.
+  repeated SavedObject nodes = 1;
+
+  // Information about captures and output structures in concrete functions.
+  // Referenced from SavedBareConcreteFunction and SavedFunction.
+  map<string, SavedConcreteFunction> concrete_functions = 2;
+}
+
+message SavedObject {
+  // Objects which this object depends on: named edges in the dependency
+  // graph.
+  //
+  // Note: All kinds of SavedObject may have children, except
+  // "constant" and "captured_tensor".
+  repeated TrackableObjectGraph.TrackableObject.ObjectReference children = 1;
+
+  // Ordered list of dependencies that must be loaded before this object.
+  // SavedModel loads with the bottom-up approach, by first creating all objects
+  // (in the order defined by the dependencies), then connecting the edges.
+  repeated TrackableObjectGraph.TrackableObject.ObjectReference dependencies =
+      15;
+
+  // Removed when forking SavedObject from TrackableObjectGraph.
+  reserved "attributes";
+  reserved 2;
+
+  // Slot variables owned by this object. This describes the three-way
+  // (optimizer, variable, slot variable) relationship; none of the three
+  // depend on the others directly.
+  //
+  // Note: currently only valid if kind == "user_object".
+  repeated TrackableObjectGraph.TrackableObject.SlotVariableReference
+      slot_variables = 3;
+
+  oneof kind {
+    SavedUserObject user_object = 4;
+    SavedAsset asset = 5;
+    SavedFunction function = 6;
+    SavedVariable variable = 7;
+    SavedBareConcreteFunction bare_concrete_function = 8;
+    SavedConstant constant = 9;
+    SavedResource resource = 10;
+    CapturedTensor captured_tensor = 12;
+  }
+
+  // Stores the functions used to save and restore this object. At most one of
+  // `saveable_objects` or `registered_saver` is defined for each SavedObject.
+  // See the comment below for the difference between SaveableObject and
+  // registered savers.
+  map<string, SaveableObject> saveable_objects = 11;
+
+  // The fields below are filled when the user serializes a registered Trackable
+  // class or an object with a registered saver function.
+  //
+  // Registered classes may save additional metadata and supersede the
+  // default loading process where nodes are recreated from the proto.
+  // If the registered class cannot be found, then the object will load as one
+  // one of the default trackable objects: Autotrackable (a class similar to
+  // tf.Module), tf.function, or tf.Variable.
+  //
+  // Unlike SaveableObjects, which store the functions for saving and restoring
+  // from tensors, registered savers allow Trackables to write checkpoint shards
+  // directly (e.g. for performance or coordination reasons).
+  // *All registered savers must be available when loading the SavedModel.*
+
+  // The name of the registered class of the form "{package}.{class_name}".
+  // This field is used to search for the registered class at loading time.
+  string registered_name = 13;
+  // The user-generated proto storing metadata for this object, to be passed to
+  // the registered classes's _deserialize_from_proto method when this object is
+  // loaded from the SavedModel.
+  google.protobuf.Any serialized_user_proto = 14;
+
+  // String name of the registered saver. At most one of `saveable_objects` or
+  // `registered_saver` is defined for each SavedObject.
+  string registered_saver = 16;
+}
+
+// A SavedUserObject is an object (in the object-oriented language of the
+// TensorFlow program) of some user- or framework-defined class other than
+// those handled specifically by the other kinds of SavedObjects.
+//
+// This object cannot be evaluated as a tensor, and therefore cannot be bound
+// to an input of a function.
+message SavedUserObject {
+  // Corresponds to a registration of the type to use in the loading program.
+  string identifier = 1;
+  // Version information from the producer of this SavedUserObject.
+  VersionDef version = 2;
+  // Metadata for deserializing this object.
+  //
+  // Deprecated! At the time of deprecation, Keras was the only user of this
+  // field, and its saving and loading code will be updated shortly.
+  // Please save your application-specific metadata to a separate file.
+  string metadata = 3 [deprecated = true];
+}
+
+// A SavedAsset points to an asset in the MetaGraph.
+//
+// When bound to a function this object evaluates to a tensor with the absolute
+// filename. Users should not depend on a particular part of the filename to
+// remain stable (e.g. basename could be changed).
+message SavedAsset {
+  // Index into `MetaGraphDef.asset_file_def[]` that describes the Asset.
+  //
+  // Only the field `AssetFileDef.filename` is used. Other fields, such as
+  // `AssetFileDef.tensor_info`, MUST be ignored.
+  int32 asset_file_def_index = 1;
+}
+
+// A function with multiple signatures, possibly with non-Tensor arguments.
+message SavedFunction {
+  repeated string concrete_functions = 1;
+  FunctionSpec function_spec = 2;
+}
+
+message CapturedTensor {
+  // Name of captured tensor
+  string name = 1;
+
+  // Name of concrete function which contains the computed graph tensor.
+  string concrete_function = 2;
+}
+
+// Stores low-level information about a concrete function. Referenced in either
+// a SavedFunction or a SavedBareConcreteFunction.
+message SavedConcreteFunction {
+  repeated int32 bound_inputs = 2;
+
+  // Input in canonicalized form that was received to create this concrete
+  // function.
+  StructuredValue canonicalized_input_signature = 3;
+  // Output that was the return value of this function after replacing all
+  // Tensors with TensorSpecs. This can be an arbitrary nested function and will
+  // be used to reconstruct the full structure from pure tensors.
+  StructuredValue output_signature = 4;
+}
+
+message SavedBareConcreteFunction {
+  // Identifies a SavedConcreteFunction.
+  string concrete_function_name = 1;
+
+  // A sequence of unique strings, one per Tensor argument.
+  repeated string argument_keywords = 2;
+  // The prefix of `argument_keywords` which may be identified by position.
+  int64 allowed_positional_arguments = 3;
+  // The spec of the function that this ConcreteFunction is traced from. This
+  // allows the ConcreteFunction to be called with nest structure inputs. This
+  // field may not be populated. If this field is absent, the concrete function
+  // can only be called with flat inputs.
+  // TODO(b/169361281): support calling saved ConcreteFunction with structured
+  // inputs in C++ SavedModel API.
+  FunctionSpec function_spec = 4;
+}
+
+message SavedConstant {
+  // An Operation name for a ConstantOp in this SavedObjectGraph's MetaGraph.
+  string operation = 1;
+}
+
+// Represents a Variable that is initialized by loading the contents from the
+// checkpoint.
+message SavedVariable {
+  DataType dtype = 1;
+  TensorShapeProto shape = 2;
+  bool trainable = 3;
+  VariableSynchronization synchronization = 4;
+  VariableAggregation aggregation = 5;
+  string name = 6;
+  string device = 7;
+  // List of component variables for a distributed variable.
+  //
+  // When this field is non-empty, the SavedVariable will be assumed
+  // to be a distributed variable defined by the components listed here.
+  //
+  // This is only supported by experimental loaders at the moment.
+  repeated SavedVariable experimental_distributed_variable_components = 8;
+}
+
+// Represents `FunctionSpec` used in `Function`. This represents a
+// function that has been wrapped as a TensorFlow `Function`.
+message FunctionSpec {
+  // Full arg spec from inspect.getfullargspec().
+  StructuredValue fullargspec = 1;
+  // Whether this represents a class method.
+  bool is_method = 2;
+  // The input signature, if specified.
+  StructuredValue input_signature = 5;
+
+  // Whether the function should be compiled by XLA.
+  //
+  // The public interface to `tf.function` uses an optional boolean to
+  // represent three distinct states for this field.  Unfortunately, proto3
+  // removes the ability to explicitly check for the presence or absence of a
+  // field, so we instead map to an enum.
+  //
+  // See `tf.function` for details.
+  enum JitCompile {
+    DEFAULT = 0;
+    ON = 1;
+    OFF = 2;
+  }
+  JitCompile jit_compile = 6;
+
+  reserved 3, 4;
+}
+
+// A SavedResource represents a TF object that holds state during its lifetime.
+// An object of this type can have a reference to a:
+// create_resource() and an initialize() function.
+message SavedResource {
+  // A device specification indicating a required placement for the resource
+  // creation function, e.g. "CPU". An empty string allows the user to select a
+  // device.
+  string device = 1;
+}
+
+message SaveableObject {
+  // Node ids of concrete functions for saving and loading from a checkpoint.
+  // These functions save and restore directly from tensors.
+  int32 save_function = 2;
+  int32 restore_function = 3;
+}
diff --git a/src/main/proto/tensorflow/core/protobuf/saver.proto b/src/main/proto/tensorflow/core/protobuf/saver.proto
new file mode 100644
index 0000000..208468b
--- /dev/null
+++ b/src/main/proto/tensorflow/core/protobuf/saver.proto
@@ -0,0 +1,48 @@
+syntax = "proto3";
+
+package tensorflow;
+
+option cc_enable_arenas = true;
+option java_outer_classname = "SaverProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.util";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto";
+
+// Protocol buffer representing the configuration of a Saver.
+message SaverDef {
+  // The name of the tensor in which to specify the filename when saving or
+  // restoring a model checkpoint.
+  string filename_tensor_name = 1;
+
+  // The operation to run when saving a model checkpoint.
+  string save_tensor_name = 2;
+
+  // The operation to run when restoring a model checkpoint.
+  string restore_op_name = 3;
+
+  // Maximum number of checkpoints to keep.  If 0, no checkpoints are deleted.
+  int32 max_to_keep = 4;
+
+  // Shard the save files, one per device that has Variable nodes.
+  bool sharded = 5;
+
+  // How often to keep an additional checkpoint. If not specified, only the last
+  // "max_to_keep" checkpoints are kept; if specified, in addition to keeping
+  // the last "max_to_keep" checkpoints, an additional checkpoint will be kept
+  // for every n hours of training.
+  float keep_checkpoint_every_n_hours = 6;
+
+  // A version number that identifies a different on-disk checkpoint format.
+  // Usually, each subclass of BaseSaverBuilder works with a particular
+  // version/format.  However, it is possible that the same builder may be
+  // upgraded to support a newer checkpoint format in the future.
+  enum CheckpointFormatVersion {
+    // Internal legacy format.
+    LEGACY = 0;
+    // Deprecated format: tf.Saver() which works with tensorflow::table::Table.
+    V1 = 1;
+    // Current format: more efficient.
+    V2 = 2;
+  }
+  CheckpointFormatVersion version = 7;
+}
diff --git a/src/main/proto/tensorflow/core/protobuf/struct.proto b/src/main/proto/tensorflow/core/protobuf/struct.proto
new file mode 100644
index 0000000..019382b
--- /dev/null
+++ b/src/main/proto/tensorflow/core/protobuf/struct.proto
@@ -0,0 +1,164 @@
+syntax = "proto3";
+
+package tensorflow;
+
+import "tensorflow/core/framework/tensor.proto";
+import "tensorflow/core/framework/tensor_shape.proto";
+import "tensorflow/core/framework/types.proto";
+
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto";
+
+// `StructuredValue` represents a dynamically typed value representing various
+// data structures that are inspired by Python data structures typically used in
+// TensorFlow functions as inputs and outputs.
+//
+// For example when saving a Layer there may be a `training` argument. If the
+// user passes a boolean True/False, that switches between two concrete
+// TensorFlow functions. In order to switch between them in the same way after
+// loading the SavedModel, we need to represent "True" and "False".
+//
+// A more advanced example might be a function which takes a list of
+// dictionaries mapping from strings to Tensors. In order to map from
+// user-specified arguments `[{"a": tf.constant(1.)}, {"q": tf.constant(3.)}]`
+// after load to the right saved TensorFlow function, we need to represent the
+// nested structure and the strings, recording that we have a trace for anything
+// matching `[{"a": tf.TensorSpec(None, tf.float32)}, {"q": tf.TensorSpec([],
+// tf.float64)}]` as an example.
+//
+// Likewise functions may return nested structures of Tensors, for example
+// returning a dictionary mapping from strings to Tensors. In order for the
+// loaded function to return the same structure we need to serialize it.
+//
+// This is an ergonomic aid for working with loaded SavedModels, not a promise
+// to serialize all possible function signatures. For example we do not expect
+// to pickle generic Python objects, and ideally we'd stay language-agnostic.
+message StructuredValue {
+  // The kind of value.
+  oneof kind {
+    // Represents None.
+    NoneValue none_value = 1;
+
+    // Represents a double-precision floating-point value (a Python `float`).
+    double float64_value = 11;
+    // Represents a signed integer value, limited to 64 bits.
+    // Larger values from Python's arbitrary-precision integers are unsupported.
+    sint64 int64_value = 12;
+    // Represents a string of Unicode characters stored in a Python `str`.
+    // In Python 3, this is exactly what type `str` is.
+    // In Python 2, this is the UTF-8 encoding of the characters.
+    // For strings with ASCII characters only (as often used in TensorFlow code)
+    // there is effectively no difference between the language versions.
+    // The obsolescent `unicode` type of Python 2 is not supported here.
+    string string_value = 13;
+    // Represents a boolean value.
+    bool bool_value = 14;
+
+    // Represents a TensorShape.
+    tensorflow.TensorShapeProto tensor_shape_value = 31;
+    // Represents an enum value for dtype.
+    tensorflow.DataType tensor_dtype_value = 32;
+    // Represents a value for tf.TensorSpec.
+    TensorSpecProto tensor_spec_value = 33;
+    // Represents a value for tf.TypeSpec.
+    TypeSpecProto type_spec_value = 34;
+    // Represents a value for tf.BoundedTensorSpec.
+    BoundedTensorSpecProto bounded_tensor_spec_value = 35;
+
+    // Represents a list of `Value`.
+    ListValue list_value = 51;
+    // Represents a tuple of `Value`.
+    TupleValue tuple_value = 52;
+    // Represents a dict `Value`.
+    DictValue dict_value = 53;
+    // Represents Python's namedtuple.
+    NamedTupleValue named_tuple_value = 54;
+    // Represents a value for tf.Tensor.
+    tensorflow.TensorProto tensor_value = 55;
+    // Represents a value for np.ndarray.
+    tensorflow.TensorProto numpy_value = 56;
+  }
+}
+
+// Represents None.
+message NoneValue {}
+
+// Represents a Python list.
+message ListValue {
+  repeated StructuredValue values = 1;
+}
+
+// Represents a Python tuple.
+message TupleValue {
+  repeated StructuredValue values = 1;
+}
+
+// Represents a Python dict keyed by `str`.
+// The comment on Unicode from Value.string_value applies analogously.
+message DictValue {
+  map<string, StructuredValue> fields = 1;
+}
+
+// Represents a (key, value) pair.
+message PairValue {
+  string key = 1;
+  StructuredValue value = 2;
+}
+
+// Represents Python's namedtuple.
+message NamedTupleValue {
+  string name = 1;
+  repeated PairValue values = 2;
+}
+
+// A protobuf to represent tf.TensorSpec.
+message TensorSpecProto {
+  string name = 1;
+  tensorflow.TensorShapeProto shape = 2;
+  tensorflow.DataType dtype = 3;
+}
+
+// A protobuf to represent tf.BoundedTensorSpec.
+message BoundedTensorSpecProto {
+  string name = 1;
+  tensorflow.TensorShapeProto shape = 2;
+  tensorflow.DataType dtype = 3;
+  tensorflow.TensorProto minimum = 4;
+  tensorflow.TensorProto maximum = 5;
+}
+
+// Represents a tf.TypeSpec
+message TypeSpecProto {
+  enum TypeSpecClass {
+    UNKNOWN = 0;
+    SPARSE_TENSOR_SPEC = 1;   // tf.SparseTensorSpec
+    INDEXED_SLICES_SPEC = 2;  // tf.IndexedSlicesSpec
+    RAGGED_TENSOR_SPEC = 3;   // tf.RaggedTensorSpec
+    TENSOR_ARRAY_SPEC = 4;    // tf.TensorArraySpec
+    DATA_DATASET_SPEC = 5;    // tf.data.DatasetSpec
+    DATA_ITERATOR_SPEC = 6;   // IteratorSpec from data/ops/iterator_ops.py
+    OPTIONAL_SPEC = 7;        // tf.OptionalSpec
+    PER_REPLICA_SPEC = 8;     // PerReplicaSpec from distribute/values.py
+    VARIABLE_SPEC = 9;        // tf.VariableSpec
+    ROW_PARTITION_SPEC = 10;  // RowPartitionSpec from ragged/row_partition.py
+    reserved 11;
+    REGISTERED_TYPE_SPEC = 12;  // The type registered as type_spec_class_name.
+    EXTENSION_TYPE_SPEC = 13;   // Subclasses of tf.ExtensionType
+  }
+  TypeSpecClass type_spec_class = 1;
+
+  // The value returned by TypeSpec._serialize().
+  StructuredValue type_state = 2;
+
+  // The name of the TypeSpec class.
+  //  * If type_spec_class == REGISTERED_TYPE_SPEC, the TypeSpec class is
+  //    the one registered under this name. For types registered outside
+  //    core TensorFlow by an add-on library, that library must be loaded
+  //    before this value can be deserialized by nested_structure_coder.
+  //  * If type_spec_class specifies a particular TypeSpec class, this field is
+  //    redundant with the type_spec_class enum, and is only used for error
+  //    reporting in older binaries that do not know the tupe_spec_class enum.
+  string type_spec_class_name = 3;
+
+  // The number of flat tensor components required by this TypeSpec.
+  int32 num_flat_components = 4;
+}
diff --git a/src/main/proto/tensorflow/core/protobuf/trackable_object_graph.proto b/src/main/proto/tensorflow/core/protobuf/trackable_object_graph.proto
new file mode 100644
index 0000000..2fe4fad
--- /dev/null
+++ b/src/main/proto/tensorflow/core/protobuf/trackable_object_graph.proto
@@ -0,0 +1,80 @@
+syntax = "proto3";
+
+package tensorflow;
+
+import "google/protobuf/wrappers.proto";
+
+option cc_enable_arenas = true;
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto";
+
+// A TensorBundle addition which saves extra information about the objects which
+// own variables, allowing for more robust checkpoint loading into modified
+// programs.
+
+message TrackableObjectGraph {
+  message TrackableObject {
+    message ObjectReference {
+      // An index into `TrackableObjectGraph.nodes`, indicating the object
+      // being referenced.
+      int32 node_id = 1;
+      // A user-provided name for the edge.
+      string local_name = 2;
+    }
+
+    message SerializedTensor {
+      // A name for the Tensor. Simple variables have only one
+      // `SerializedTensor` named "VARIABLE_VALUE" by convention. This value may
+      // be restored on object creation as an optimization.
+      string name = 1;
+      // The full name of the variable/tensor, if applicable. Used to allow
+      // name-based loading of checkpoints which were saved using an
+      // object-based API. Should match the checkpoint key which would have been
+      // assigned by tf.train.Saver.
+      string full_name = 2;
+      // The generated name of the Tensor in the checkpoint.
+      string checkpoint_key = 3;
+      // Deprecated bool field for optional restore. This field has never been
+      // set to True.
+      reserved "optional_restore";
+      reserved 4;
+    }
+
+    message SlotVariableReference {
+      // An index into `TrackableObjectGraph.nodes`, indicating the
+      // variable object this slot was created for.
+      int32 original_variable_node_id = 1;
+      // The name of the slot (e.g. "m"/"v").
+      string slot_name = 2;
+      // An index into `TrackableObjectGraph.nodes`, indicating the
+      // `Object` with the value of the slot variable.
+      int32 slot_variable_node_id = 3;
+    }
+
+    // Objects which this object depends on.
+    repeated ObjectReference children = 1;
+    // Serialized data specific to this object.
+    repeated SerializedTensor attributes = 2;
+    // Slot variables owned by this object.
+    repeated SlotVariableReference slot_variables = 3;
+
+    // The registered saver used to save this object. If this saver is not
+    // present when loading the checkpoint, then loading will fail.
+    RegisteredSaver registered_saver = 4;
+
+    // Whether this object has checkpoint values or descendants with checkpoint
+    // values. This is computed at save time to avoid traversing the entire
+    // object graph proto when restoring (which also has to traverse the live
+    // object graph).
+    google.protobuf.BoolValue has_checkpoint_values = 5;
+  }
+
+  repeated TrackableObject nodes = 1;
+}
+
+message RegisteredSaver {
+  // The name of the registered saver/restore function.
+  string name = 1;
+
+  // Unique auto-generated name of the object.
+  string object_name = 2;
+}
diff --git a/src/main/proto/tensorflow/core/protobuf/verifier_config.proto b/src/main/proto/tensorflow/core/protobuf/verifier_config.proto
new file mode 100644
index 0000000..21885ff
--- /dev/null
+++ b/src/main/proto/tensorflow/core/protobuf/verifier_config.proto
@@ -0,0 +1,27 @@
+syntax = "proto3";
+
+package tensorflow;
+
+option cc_enable_arenas = true;
+option java_outer_classname = "VerifierConfigProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto";
+
+// The config for graph verifiers.
+message VerifierConfig {
+  enum Toggle {
+    DEFAULT = 0;
+    ON = 1;
+    OFF = 2;
+  }
+
+  // Deadline for completion of all verification i.e. all the Toggle ON
+  // verifiers must complete execution within this time.
+  int64 verification_timeout_in_ms = 1;
+
+  // Perform structural validation on a tensorflow graph. Default is OFF.
+  Toggle structure_verifier = 2;
+
+  // Next tag: 3
+}
diff --git a/src/main/proto/tensorflow_serving/apis/classification.proto b/src/main/proto/tensorflow_serving/apis/classification.proto
new file mode 100644
index 0000000..4de0b7d
--- /dev/null
+++ b/src/main/proto/tensorflow_serving/apis/classification.proto
@@ -0,0 +1,48 @@
+syntax = "proto3";
+
+package tensorflow.serving;
+
+import "tensorflow_serving/apis/input.proto";
+import "tensorflow_serving/apis/model.proto";
+
+option cc_enable_arenas = true;
+
+// A single class.
+message Class {
+  // Label or name of the class.
+  string label = 1;
+  // Score for this class (e.g., the probability the item belongs to this
+  // class). As per the proto3 default-value semantics, if the score is missing,
+  // it should be treated as 0.
+  float score = 2;
+}
+
+// List of classes for a single item (tensorflow.Example).
+message Classifications {
+  repeated Class classes = 1;
+}
+
+// Contains one result per input example, in the same order as the input in
+// ClassificationRequest.
+message ClassificationResult {
+  repeated Classifications classifications = 1;
+}
+
+// RPC Interfaces
+
+message ClassificationRequest {
+  // Model Specification. If version is not specified, will use the latest
+  // (numerical) version.
+  ModelSpec model_spec = 1;
+
+  // Input data.
+  tensorflow.serving.Input input = 2;
+}
+
+message ClassificationResponse {
+  // Effective Model Specification used for classification.
+  ModelSpec model_spec = 2;
+
+  // Result of the classification.
+  ClassificationResult result = 1;
+}
diff --git a/src/main/proto/tensorflow_serving/apis/get_model_metadata.proto b/src/main/proto/tensorflow_serving/apis/get_model_metadata.proto
new file mode 100644
index 0000000..5d765d8
--- /dev/null
+++ b/src/main/proto/tensorflow_serving/apis/get_model_metadata.proto
@@ -0,0 +1,30 @@
+syntax = "proto3";
+
+package tensorflow.serving;
+option cc_enable_arenas = true;
+
+import "google/protobuf/any.proto";
+import "tensorflow/core/protobuf/meta_graph.proto";
+import "tensorflow_serving/apis/model.proto";
+
+// Message returned for "signature_def" field.
+message SignatureDefMap {
+  map<string, SignatureDef> signature_def = 1;
+};
+
+message GetModelMetadataRequest {
+  // Model Specification indicating which model we are querying for metadata.
+  // If version is not specified, will use the latest (numerical) version.
+  ModelSpec model_spec = 1;
+  // Metadata fields to get. Currently supported: "signature_def".
+  repeated string metadata_field = 2;
+}
+
+message GetModelMetadataResponse {
+  // Model Specification indicating which model this metadata belongs to.
+  ModelSpec model_spec = 1;
+  // Map of metadata field name to metadata field. The options for metadata
+  // field name are listed in GetModelMetadataRequest. Currently supported:
+  // "signature_def".
+  map<string, google.protobuf.Any> metadata = 2;
+}
diff --git a/src/main/proto/tensorflow_serving/apis/get_model_status.proto b/src/main/proto/tensorflow_serving/apis/get_model_status.proto
new file mode 100644
index 0000000..8881247
--- /dev/null
+++ b/src/main/proto/tensorflow_serving/apis/get_model_status.proto
@@ -0,0 +1,68 @@
+syntax = "proto3";
+
+package tensorflow.serving;
+
+import "tensorflow_serving/apis/model.proto";
+import "tensorflow_serving/apis/status.proto";
+
+option cc_enable_arenas = true;
+
+// GetModelStatusRequest contains a ModelSpec indicating the model for which
+// to get status.
+message GetModelStatusRequest {
+  // Model Specification. If version is not specified, information about all
+  // versions of the model will be returned. If a version is specified, the
+  // status of only that version will be returned.
+  ModelSpec model_spec = 1;
+}
+
+// Version number, state, and status for a single version of a model.
+message ModelVersionStatus {
+  // Model version.
+  int64 version = 1;
+
+  // States that map to ManagerState enum in
+  // tensorflow_serving/core/servable_state.h
+  enum State {
+    // Default value.
+    UNKNOWN = 0;
+
+    // The manager is tracking this servable, but has not initiated any action
+    // pertaining to it.
+    START = 10;
+
+    // The manager has decided to load this servable. In particular, checks
+    // around resource availability and other aspects have passed, and the
+    // manager is about to invoke the loader's Load() method.
+    LOADING = 20;
+
+    // The manager has successfully loaded this servable and made it available
+    // for serving (i.e. GetServableHandle(id) will succeed). To avoid races,
+    // this state is not reported until *after* the servable is made
+    // available.
+    AVAILABLE = 30;
+
+    // The manager has decided to make this servable unavailable, and unload
+    // it. To avoid races, this state is reported *before* the servable is
+    // made unavailable.
+    UNLOADING = 40;
+
+    // This servable has reached the end of its journey in the manager. Either
+    // it loaded and ultimately unloaded successfully, or it hit an error at
+    // some point in its lifecycle.
+    END = 50;
+  }
+
+  // Model state.
+  State state = 2;
+
+  // Model status.
+  StatusProto status = 3;
+}
+
+// Response for ModelStatusRequest on successful run.
+message GetModelStatusResponse {
+  // Version number and status information for applicable model version(s).
+  repeated ModelVersionStatus model_version_status = 1
+      [json_name = "model_version_status"];
+}
diff --git a/src/main/proto/tensorflow_serving/apis/inference.proto b/src/main/proto/tensorflow_serving/apis/inference.proto
new file mode 100644
index 0000000..16e85ce
--- /dev/null
+++ b/src/main/proto/tensorflow_serving/apis/inference.proto
@@ -0,0 +1,59 @@
+// This file contains messages for various machine learning inferences
+// such as regression and classification.
+//
+// In many applications more than one type of inference is desired for a single
+// input.  For example, given meteorologic data an application may want to
+// perform a classification to determine if we should expect rain, snow or sun
+// and also perform a regression to predict the temperature.
+// Sharing the single input data between two inference tasks can be accomplished
+// using MultiInferenceRequest and MultiInferenceResponse.
+
+syntax = "proto3";
+
+option cc_enable_arenas = true;
+
+import "tensorflow_serving/apis/classification.proto";
+import "tensorflow_serving/apis/input.proto";
+import "tensorflow_serving/apis/model.proto";
+import "tensorflow_serving/apis/regression.proto";
+
+package tensorflow.serving;
+
+// Inference request such as classification, regression, etc...
+message InferenceTask {
+  // Model Specification. If version is not specified, will use the latest
+  // (numerical) version.
+  // All ModelSpecs in a MultiInferenceRequest must access the same model name.
+  ModelSpec model_spec = 1;
+
+  // Signature's method_name. Should be one of the method names defined in
+  // third_party/tensorflow/python/saved_model/signature_constants.py.
+  // e.g. "tensorflow/serving/classify".
+  string method_name = 2;
+}
+
+// Inference result, matches the type of request or is an error.
+message InferenceResult {
+  ModelSpec model_spec = 1;
+
+  oneof result {
+    ClassificationResult classification_result = 2;
+    RegressionResult regression_result = 3;
+  }
+}
+
+// Inference request containing one or more requests.
+message MultiInferenceRequest {
+  // Inference tasks.
+  repeated InferenceTask tasks = 1;
+
+  // Input data.
+  Input input = 2;
+}
+
+// Inference request containing one or more responses.
+message MultiInferenceResponse {
+  // List of results; one for each InferenceTask in the request, returned in the
+  // same order as the request.
+  repeated InferenceResult results = 1;
+}
diff --git a/src/main/proto/tensorflow_serving/apis/input.proto b/src/main/proto/tensorflow_serving/apis/input.proto
new file mode 100644
index 0000000..e47ff43
--- /dev/null
+++ b/src/main/proto/tensorflow_serving/apis/input.proto
@@ -0,0 +1,82 @@
+// Input used in serving APIs.  Based on the tensorflow.Example family of
+// feature representations.
+
+syntax = "proto3";
+
+option cc_enable_arenas = true;
+
+import "tensorflow/core/example/example.proto";
+
+package tensorflow.serving;
+
+// Specifies one or more fully independent input Examples.
+// See examples at:
+//     https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/example/example.proto
+message ExampleList {
+  repeated tensorflow.Example examples = 1;
+}
+
+// Specifies one or more independent input Examples, with a common context
+// Example.
+//
+// The common use case for context is to cleanly and optimally specify some
+// features that are common across multiple examples.
+//
+// See example below with a search query as the context and multiple restaurants
+// to perform some inference on.
+//
+// context: {
+//   features: {
+//     feature: {
+//       key  : "query"
+//       value: {
+//         bytes_list: {
+//           value: [ "pizza" ]
+//         }
+//       }
+//     }
+//   }
+// }
+// examples: {
+//   features: {
+//     feature: {
+//       key  : "cuisine"
+//       value: {
+//         bytes_list: {
+//           value: [ "Pizzeria" ]
+//         }
+//       }
+//     }
+//   }
+// }
+// examples: {
+//   features: {
+//     feature: {
+//       key  : "cuisine"
+//       value: {
+//         bytes_list: {
+//           value: [ "Taqueria" ]
+//         }
+//       }
+//     }
+//   }
+// }
+//
+// Implementations of ExampleListWithContext merge the context Example into each
+// of the Examples. Note that feature keys must not be duplicated between the
+// Examples and context Example, or the behavior is undefined.
+//
+// See also:
+//     tensorflow/core/example/example.proto
+//     https://developers.google.com/protocol-buffers/docs/proto3#maps
+message ExampleListWithContext {
+  repeated tensorflow.Example examples = 1;
+  tensorflow.Example context = 2;
+}
+
+message Input {
+  oneof kind {
+    ExampleList example_list = 1 [lazy = true];
+    ExampleListWithContext example_list_with_context = 2 [lazy = true];
+  }
+}
diff --git a/src/main/proto/tensorflow_serving/apis/logging.proto b/src/main/proto/tensorflow_serving/apis/logging.proto
new file mode 100644
index 0000000..0f9e876
--- /dev/null
+++ b/src/main/proto/tensorflow_serving/apis/logging.proto
@@ -0,0 +1,20 @@
+syntax = "proto3";
+
+package tensorflow.serving;
+
+import "tensorflow_serving/apis/model.proto";
+import "tensorflow_serving/config/logging_config.proto";
+
+option cc_enable_arenas = true;
+
+// Metadata logged along with the request logs.
+message LogMetadata {
+  ModelSpec model_spec = 1;
+  SamplingConfig sampling_config = 2;
+  // List of tags used to load the relevant MetaGraphDef from SavedModel.
+  repeated string saved_model_tags = 3;
+  int64 timestamp_secs = 4;        // Seconds since epoch.
+  string dc = 5;                   // Datacenter where the request was logged.
+  string request_origin = 6;       // Request origin identifier.
+  string request_criticality = 7;  // Request QoS.
+}
diff --git a/src/main/proto/tensorflow_serving/apis/model.proto b/src/main/proto/tensorflow_serving/apis/model.proto
new file mode 100644
index 0000000..afd9590
--- /dev/null
+++ b/src/main/proto/tensorflow_serving/apis/model.proto
@@ -0,0 +1,36 @@
+syntax = "proto3";
+
+package tensorflow.serving;
+
+import "google/protobuf/wrappers.proto";
+
+option cc_enable_arenas = true;
+
+// Metadata for an inference request such as the model name and version.
+message ModelSpec {
+  // Required servable name.
+  string name = 1;
+
+  // Optional choice of which version of the model to use.
+  //
+  // Expected to be left unset in the common case. Should be specified when
+  // there is a strong version consistency requirement (e.g. when the model
+  // signature changes across versions and requests need to be
+  // version-specific).
+  //
+  // When left unspecified, the system will serve the best available version.
+  // This is typically the latest version, though during version transitions,
+  // notably when serving on a fleet of instances, may be either the previous or
+  // new version.
+  oneof version_choice {
+    // Use this specific version number.
+    google.protobuf.Int64Value version = 2;
+
+    // Use the version associated with the given label.
+    string version_label = 4;
+  }
+
+  // A named signature to evaluate. If unspecified, the default signature will
+  // be used.
+  string signature_name = 3;
+}
diff --git a/src/main/proto/tensorflow_serving/apis/model_management.proto b/src/main/proto/tensorflow_serving/apis/model_management.proto
new file mode 100644
index 0000000..2eba09b
--- /dev/null
+++ b/src/main/proto/tensorflow_serving/apis/model_management.proto
@@ -0,0 +1,25 @@
+syntax = "proto3";
+
+package tensorflow.serving;
+
+import "tensorflow_serving/apis/status.proto";
+import "tensorflow_serving/config/model_server_config.proto";
+
+option cc_enable_arenas = true;
+
+message ReloadConfigRequest {
+  ModelServerConfig config = 1;
+  repeated string metric_names = 2;
+}
+
+message ReloadConfigResponse {
+  StatusProto status = 1;
+  repeated Metric metric = 2;
+}
+
+message Metric {
+  string name = 1;
+  oneof value_increase {
+    int64 int64_value_increase = 2;
+  }
+}
diff --git a/src/main/proto/tensorflow_serving/apis/model_service.proto b/src/main/proto/tensorflow_serving/apis/model_service.proto
new file mode 100644
index 0000000..29a3b07
--- /dev/null
+++ b/src/main/proto/tensorflow_serving/apis/model_service.proto
@@ -0,0 +1,24 @@
+syntax = "proto3";
+
+option cc_enable_arenas = true;
+
+import "tensorflow_serving/apis/get_model_status.proto";
+import "tensorflow_serving/apis/model_management.proto";
+
+package tensorflow.serving;
+
+// ModelService provides methods to query and update the state of the server,
+// e.g. which models/versions are being served.
+service ModelService {
+  // Gets status of model. If the ModelSpec in the request does not specify
+  // version, information about all versions of the model will be returned. If
+  // the ModelSpec in the request does specify a version, the status of only
+  // that version will be returned.
+  rpc GetModelStatus(GetModelStatusRequest) returns (GetModelStatusResponse);
+
+  // Reloads the set of served models. The new config supersedes the old one,
+  // so if a model is omitted from the new config it will be unloaded and no
+  // longer served.
+  rpc HandleReloadConfigRequest(ReloadConfigRequest)
+      returns (ReloadConfigResponse);
+}
diff --git a/src/main/proto/tensorflow_serving/apis/predict.proto b/src/main/proto/tensorflow_serving/apis/predict.proto
new file mode 100644
index 0000000..831785a
--- /dev/null
+++ b/src/main/proto/tensorflow_serving/apis/predict.proto
@@ -0,0 +1,191 @@
+syntax = "proto3";
+
+package tensorflow.serving;
+
+import "tensorflow/core/framework/tensor.proto";
+import "tensorflow_serving/apis/model.proto";
+
+option cc_enable_arenas = true;
+
+// PredictRequest specifies which TensorFlow model to run, as well as
+// how inputs are mapped to tensors and how outputs are filtered before
+// returning to user.
+message PredictRequest {
+  // Model Specification. If version is not specified, will use the latest
+  // (numerical) version.
+  ModelSpec model_spec = 1;
+
+  // Input tensors.
+  // Names of input tensor are alias names. The mapping from aliases to real
+  // input tensor names is stored in the SavedModel export as a prediction
+  // SignatureDef under the 'inputs' field.
+  map<string, TensorProto> inputs = 2;
+
+  // Output filter.
+  // Names specified are alias names. The mapping from aliases to real output
+  // tensor names is stored in the SavedModel export as a prediction
+  // SignatureDef under the 'outputs' field.
+  // Only tensors specified here will be run/fetched and returned, with the
+  // exception that when none is specified, all tensors specified in the
+  // named signature will be run/fetched and returned.
+  repeated string output_filter = 3;
+
+  // Reserved field 4.
+  reserved 4;
+
+  // Options for streaming requests to control how multiple requests/responses
+  // are handled within a single stream.
+  PredictStreamedOptions predict_streamed_options = 5;
+
+  // Client identifier to group requests belonging to a specific entity.
+  // Example entities can be product ids, service names, user ids etc.
+  // Servers can use this to optimize placement, caching and colocation.
+  // TODO(b/329897437): Migrate to client_id in RequestOptions.
+  optional bytes client_id = 6;
+
+  // Options for PredictRequest.
+  message RequestOptions {
+    // Client identifier to group requests belonging to a specific entity.
+    // Example entities can be product ids, service names, user ids etc.
+    // Servers can use this to optimize placement, caching and colocation.
+    optional bytes client_id = 1;
+
+    // Deterministic mode for the request. When specified, model servers will
+    // reduce numeric instability based on different mode selections.
+    enum DeterministicMode {
+      DETERMINISTIC_MODE_UNSPECIFIED = 0;
+
+      // Only supported in disaggregated serving. When set, the request will be
+      // pinned to a fixed decoder slot index that's deterministic across
+      // processes.
+      FIXED_DECODER_SLOT = 1;
+    }
+
+    optional DeterministicMode deterministic_mode = 2;
+
+    // Only supported in disaggregated serving. When set, additional arrays from
+    // prefill will be returned if available.
+    optional bool return_additional_arrays_from_prefill = 3;
+
+    // Returns these stop tokens in response if the model stops at them. The
+    // model may stop at other tokens, but will not return them in the response.
+    repeated int64 return_stoptokens = 4;
+  }
+
+  optional RequestOptions request_options = 7;
+}
+
+// Options only used for streaming requests that control how inputs/ouputs are
+// handled in the stream.
+message PredictStreamedOptions {
+  // Request state used to handle splitting of requests.  NONE is the
+  // default when the stream request is not split and used for a single-turn,
+  // single request.
+  //
+  // SPLIT is used when multiple streamed requests are used to generate a
+  // logical request. END_SPLIT should be called for the last split of the
+  // multi-turn request to start the processing of the current turn. NONE can
+  // not be interspersed with SPLIT and END_SPLIT messages.
+  // If another request is sent on the same stream after END_SPLIT, it can be
+  // either SPLIT or END_SPLIT to start accumulating input or trigger the next
+  // model turn respectively.
+  //
+  // Some examples with a mix of request states and the logical request.
+  //
+  // Example 1:
+  //   NONE
+  //
+  // Single turn, single request.
+  //
+  // Example 2 :
+  //   END_SPLIT
+  //
+  // Will be treated as a single logical input request for a single turn,
+  // similar to Example 1.
+  //
+  // Example 3:
+  //   SPLIT
+  //   SPLIT
+  //   END_SPLIT
+  //
+  // Will be treated as a single logical input request for a single turn,
+  // similar to Example 1.
+  //
+  // Example 4:
+  //   END_SPLIT
+  //   END_SPLIT
+  //
+  // Will be treated as two logical turn requests (1. END_SPLIT 2. END_SPLIT)
+  //
+  // Example 5:
+  //   SPLIT
+  //   END_SPLIT
+  //   SPLIT
+  //   SPLIT
+  //   END_SPLIT
+  //
+  // Will be treated as two logical turn requests (1. SPLIT, END_SPLIT 2. SPLIT,
+  // SPLIT, END_SPLIT)
+  //
+  // Incorrect Example 1:
+  //   NONE
+  //   END_SPLIT
+  //
+  // Invalid because NONE and END_SPLIT are interspersed.
+  //
+  // Incorrect Example 2:
+  //   SPLIT
+  //   SPLIT
+  //
+  // Invalid because END_SPLIT is never called.
+  //
+  // Incorrect Example 3:
+  //   SPLIT
+  //   NONE
+  //   SPLIT
+  //   END_SPLIT
+  //
+  // Invalid because NONE is interspersed with SPLIT/END_SPLIT.
+
+  enum RequestState {
+    NONE = 0;
+    SPLIT = 1;
+    END_SPLIT = 2;
+  }
+
+  // Request state used to handle segmentation of requests.
+  RequestState request_state = 1;
+
+  // Input tensors split dimensions.
+  // Defines the dimension used to split input tensors specified
+  // in PredictRequest.inputs. The dimension will be used
+  // for concatenation of multiple SPLIT requests.
+  //
+  // For input tensor in PredictRequest.inputs that are not contained in this
+  // map, the tensors from the first SPLIT request will be used.
+  //
+  // For example, with an original input tensor of [[1, 2, 3, 4], [5, 6, 7, 8]].
+  //
+  // For a split dimension of 0 and two requests (SPLIT and END_SPLIT), the
+  // input tensors for request 1 should be [1, 2, 3, 4] and request 2 should be
+  // be [5, 6, 7, 8].
+  //
+  // For a split dimension of 1 and two requests (SPLIT and END_SPLIT), the
+  // input tensors for request 1 should be [[1, 2], [5, 6]] and request 2 should
+  // be [[3, 4], [7, 8]].
+  map<string, int32> split_dimensions = 2;
+
+  // If true, there will be a single PredictResponse output.
+  // If false, output can be split into 1 or more PredictResponses.
+  // Value of this field should be the same for all requests in the stream.
+  bool return_single_response = 3;
+}
+
+// Response for PredictRequest on successful run.
+message PredictResponse {
+  // Effective Model Specification used to process PredictRequest.
+  ModelSpec model_spec = 2;
+
+  // Output tensors.
+  map<string, TensorProto> outputs = 1;
+}
diff --git a/src/main/proto/tensorflow_serving/apis/prediction_log.proto b/src/main/proto/tensorflow_serving/apis/prediction_log.proto
new file mode 100644
index 0000000..9deebc7
--- /dev/null
+++ b/src/main/proto/tensorflow_serving/apis/prediction_log.proto
@@ -0,0 +1,55 @@
+syntax = "proto3";
+
+package tensorflow.serving;
+
+import "tensorflow_serving/apis/classification.proto";
+import "tensorflow_serving/apis/inference.proto";
+import "tensorflow_serving/apis/logging.proto";
+import "tensorflow_serving/apis/predict.proto";
+import "tensorflow_serving/apis/regression.proto";
+import "tensorflow_serving/apis/session_service.proto";
+
+option cc_enable_arenas = true;
+
+message ClassifyLog {
+  ClassificationRequest request = 1;
+  ClassificationResponse response = 2;
+}
+
+message RegressLog {
+  RegressionRequest request = 1;
+  RegressionResponse response = 2;
+}
+
+message PredictLog {
+  PredictRequest request = 1;
+  PredictResponse response = 2;
+}
+
+message PredictStreamedLog {
+  repeated PredictRequest request = 1;
+  repeated PredictResponse response = 2;
+}
+
+message MultiInferenceLog {
+  MultiInferenceRequest request = 1;
+  MultiInferenceResponse response = 2;
+}
+
+message SessionRunLog {
+  SessionRunRequest request = 1;
+  SessionRunResponse response = 2;
+}
+
+// Logged model inference request.
+message PredictionLog {
+  LogMetadata log_metadata = 1;
+  oneof log_type {
+    ClassifyLog classify_log = 2;
+    RegressLog regress_log = 3;
+    PredictLog predict_log = 6;
+    PredictStreamedLog predict_streamed_log = 7;
+    MultiInferenceLog multi_inference_log = 4;
+    SessionRunLog session_run_log = 5;
+  }
+}
diff --git a/src/main/proto/tensorflow_serving/apis/prediction_service.proto b/src/main/proto/tensorflow_serving/apis/prediction_service.proto
new file mode 100644
index 0000000..44e6554
--- /dev/null
+++ b/src/main/proto/tensorflow_serving/apis/prediction_service.proto
@@ -0,0 +1,31 @@
+syntax = "proto3";
+
+package tensorflow.serving;
+option cc_enable_arenas = true;
+
+import "tensorflow_serving/apis/classification.proto";
+import "tensorflow_serving/apis/get_model_metadata.proto";
+import "tensorflow_serving/apis/inference.proto";
+import "tensorflow_serving/apis/predict.proto";
+import "tensorflow_serving/apis/regression.proto";
+
+// open source marker; do not remove
+// PredictionService provides access to machine-learned models loaded by
+// model_servers.
+service PredictionService {
+  // Classify.
+  rpc Classify(ClassificationRequest) returns (ClassificationResponse);
+
+  // Regress.
+  rpc Regress(RegressionRequest) returns (RegressionResponse);
+
+  // Predict -- provides access to loaded TensorFlow model.
+  rpc Predict(PredictRequest) returns (PredictResponse);
+
+  // MultiInference API for multi-headed models.
+  rpc MultiInference(MultiInferenceRequest) returns (MultiInferenceResponse);
+
+  // GetModelMetadata - provides access to metadata for loaded models.
+  rpc GetModelMetadata(GetModelMetadataRequest)
+      returns (GetModelMetadataResponse);
+}
diff --git a/src/main/proto/tensorflow_serving/apis/regression.proto b/src/main/proto/tensorflow_serving/apis/regression.proto
new file mode 100644
index 0000000..5fb79ba
--- /dev/null
+++ b/src/main/proto/tensorflow_serving/apis/regression.proto
@@ -0,0 +1,37 @@
+syntax = "proto3";
+
+package tensorflow.serving;
+
+import "tensorflow_serving/apis/input.proto";
+import "tensorflow_serving/apis/model.proto";
+
+option cc_enable_arenas = true;
+
+// Regression result for a single item (tensorflow.Example).
+message Regression {
+  float value = 1;
+}
+
+// Contains one result per input example, in the same order as the input in
+// RegressionRequest.
+message RegressionResult {
+  repeated Regression regressions = 1;
+}
+
+// RPC interfaces.
+
+message RegressionRequest {
+  // Model Specification. If version is not specified, will use the latest
+  // (numerical) version.
+  ModelSpec model_spec = 1;
+
+  // Input data.
+  tensorflow.serving.Input input = 2;
+}
+
+message RegressionResponse {
+  // Effective Model Specification used for regression.
+  ModelSpec model_spec = 2;
+
+  RegressionResult result = 1;
+}
diff --git a/src/main/proto/tensorflow_serving/apis/session_service.proto b/src/main/proto/tensorflow_serving/apis/session_service.proto
new file mode 100644
index 0000000..cf53c54
--- /dev/null
+++ b/src/main/proto/tensorflow_serving/apis/session_service.proto
@@ -0,0 +1,56 @@
+syntax = "proto3";
+
+package tensorflow.serving;
+
+import "tensorflow/core/protobuf/config.proto";
+import "tensorflow/core/protobuf/named_tensor.proto";
+import "tensorflow_serving/apis/model.proto";
+
+option cc_enable_arenas = true;
+
+message SessionRunRequest {
+  // Model Specification. If version is not specified, will use the latest
+  // (numerical) version.
+  ModelSpec model_spec = 1;
+
+  // Tensors to be fed in the step. Each feed is a named tensor.
+  repeated NamedTensorProto feed = 2;
+
+  // Fetches. A list of tensor names. The caller expects a tensor to
+  // be returned for each fetch[i] (see RunResponse.tensor). The
+  // order of specified fetches does not change the execution order.
+  repeated string fetch = 3;
+
+  // Target Nodes. A list of node names. The named nodes will be run
+  // to but their outputs will not be fetched.
+  repeated string target = 4;
+
+  // If true, treat names in feed/fetch/target as alias names than actual tensor
+  // names (that appear in the TF graph). Alias names are resolved to actual
+  // names using `SignatureDef` in SavedModel associated with the model.
+  bool tensor_name_is_alias = 6;
+
+  // Options for the run call. **Currently ignored.**
+  RunOptions options = 5;
+}
+
+message SessionRunResponse {
+  // Effective Model Specification used for session run.
+  ModelSpec model_spec = 3;
+
+  // NOTE: The order of the returned tensors may or may not match
+  // the fetch order specified in RunRequest.
+  repeated NamedTensorProto tensor = 1;
+
+  // Returned metadata if requested in the options.
+  RunMetadata metadata = 2;
+}
+
+// SessionService defines a service with which a client can interact to execute
+// Tensorflow model inference. The SessionService::SessionRun method is similar
+// to MasterService::RunStep of Tensorflow, except that all sessions are ready
+// to run, and you request a specific model/session with ModelSpec.
+service SessionService {
+  // Runs inference of a given model.
+  rpc SessionRun(SessionRunRequest) returns (SessionRunResponse);
+}
diff --git a/src/main/proto/tensorflow_serving/apis/status.proto b/src/main/proto/tensorflow_serving/apis/status.proto
new file mode 100644
index 0000000..332d1b7
--- /dev/null
+++ b/src/main/proto/tensorflow_serving/apis/status.proto
@@ -0,0 +1,17 @@
+syntax = "proto3";
+
+package tensorflow.serving;
+
+import "tensorflow/core/protobuf/error_codes.proto";
+
+option cc_enable_arenas = true;
+
+// Status that corresponds to Status in
+// third_party/tensorflow/core/lib/core/status.h.
+message StatusProto {
+  // Error code.
+  error.Code error_code = 1 [json_name = "error_code"];
+
+  // Error message. Will only be set if an error was encountered.
+  string error_message = 2 [json_name = "error_message"];
+}
diff --git a/src/main/proto/tensorflow_serving/config/file_system_storage_path_source.proto b/src/main/proto/tensorflow_serving/config/file_system_storage_path_source.proto
new file mode 100644
index 0000000..6924fe4
--- /dev/null
+++ b/src/main/proto/tensorflow_serving/config/file_system_storage_path_source.proto
@@ -0,0 +1,83 @@
+syntax = "proto3";
+
+package tensorflow.serving;
+
+// Config proto for FileSystemStoragePathSource.
+message FileSystemStoragePathSourceConfig {
+  // A policy that dictates which version(s) of a servable should be served.
+  message ServableVersionPolicy {
+    // Serve the latest versions (i.e. the ones with the highest version
+    // numbers), among those found on disk.
+    //
+    // This is the default policy, with the default number of versions as 1.
+    message Latest {
+      // Number of latest versions to serve. (The default is 1.)
+      uint32 num_versions = 1;
+    }
+
+    // Serve all versions found on disk.
+    message All {}
+
+    // Serve a specific version (or set of versions).
+    //
+    // This policy is useful for rolling back to a specific version, or for
+    // canarying a specific version while still serving a separate stable
+    // version.
+    message Specific {
+      // The version numbers to serve.
+      repeated int64 versions = 1;
+    }
+
+    oneof policy_choice {
+      Latest latest = 100;
+      All all = 101;
+      Specific specific = 102;
+    }
+  }
+
+  // A servable name and base path to look for versions of the servable.
+  message ServableToMonitor {
+    // The servable name to supply in aspired-versions callback calls. Child
+    // paths of 'base_path' are considered to be versions of this servable.
+    string servable_name = 1;
+
+    // The path to monitor, i.e. look for child paths of the form base_path/123.
+    string base_path = 2;
+
+    // The policy to determines the number of versions of the servable to be
+    // served at the same time.
+    tensorflow.serving.FileSystemStoragePathSourceConfig.ServableVersionPolicy
+        servable_version_policy = 4;
+
+    reserved 3;  // Legacy version_policy definition.
+  }
+
+  // The servables to monitor for new versions, and aspire.
+  repeated ServableToMonitor servables = 5;
+
+  // How long to wait between file-system polling to look for children of
+  // 'base_path', in seconds.
+  //
+  // If set to zero, filesystem will be polled exactly once. If set to a
+  // negative value (for testing use only), polling will be entirely disabled.
+  int64 file_system_poll_wait_seconds = 3;
+
+  // If true, then FileSystemStoragePathSource::Create() and ::UpdateConfig()
+  // fail if, for any configured servables, the file system doesn't currently
+  // contain at least one version under the base path.
+  // (Otherwise, it will emit a warning and keep pinging the file system to
+  // check for a version to appear later.)
+  // DEPRECATED: Use 'servable_versions_always_present' instead, which includes
+  // this behavior.
+  // TODO(b/30898016): Remove 2019-10-31 or later.
+  bool fail_if_zero_versions_at_startup = 4 [deprecated = true];
+
+  // If true, the servable is always expected to exist on the underlying
+  // filesystem. FileSystemStoragePathSource::Create() and ::UpdateConfig() will
+  // fail if, for any configured servables, the file system doesn't currently
+  // contain at least one version under the base path. In addition, if a polling
+  // loop find the base path empty, it will not unload existing servables.
+  bool servable_versions_always_present = 6;
+
+  reserved 1, 2;
+}
diff --git a/src/main/proto/tensorflow_serving/config/log_collector_config.proto b/src/main/proto/tensorflow_serving/config/log_collector_config.proto
new file mode 100644
index 0000000..4ce01d3
--- /dev/null
+++ b/src/main/proto/tensorflow_serving/config/log_collector_config.proto
@@ -0,0 +1,12 @@
+syntax = "proto3";
+
+package tensorflow.serving;
+option cc_enable_arenas = true;
+
+message LogCollectorConfig {
+  // Identifies the type of the LogCollector we will use to collect these logs.
+  string type = 1;
+
+  // The prefix to use for the filenames of the logs.
+  string filename_prefix = 2;
+}
diff --git a/src/main/proto/tensorflow_serving/config/logging_config.proto b/src/main/proto/tensorflow_serving/config/logging_config.proto
new file mode 100644
index 0000000..31fbe94
--- /dev/null
+++ b/src/main/proto/tensorflow_serving/config/logging_config.proto
@@ -0,0 +1,29 @@
+syntax = "proto3";
+
+package tensorflow.serving;
+
+import "tensorflow_serving/config/log_collector_config.proto";
+
+option cc_enable_arenas = true;
+
+message SamplingConfig {
+  // Requests will be logged uniformly at random with this probability.
+  // Valid range: [0, 1.0].
+  double sampling_rate = 1;
+
+  // Attributes of requests that can be optionally sampled.
+  // Note: Enabling more attributes will increase logging storage requirements.
+  enum Attributes {
+    ATTR_DEFAULT = 0x0;
+    ATTR_REQUEST_ORIGIN = 0x1;
+    ATTR_REQUEST_CRITICALITY = 0x2;
+  }
+  // Bitwise OR of above attributes
+  int32 attributes = 2;
+}
+
+// Configuration for logging query/responses.
+message LoggingConfig {
+  LogCollectorConfig log_collector_config = 1;
+  SamplingConfig sampling_config = 2;
+}
diff --git a/src/main/proto/tensorflow_serving/config/model_server_config.proto b/src/main/proto/tensorflow_serving/config/model_server_config.proto
new file mode 100644
index 0000000..cadc2b6
--- /dev/null
+++ b/src/main/proto/tensorflow_serving/config/model_server_config.proto
@@ -0,0 +1,85 @@
+syntax = "proto3";
+
+package tensorflow.serving;
+
+import "google/protobuf/any.proto";
+import "tensorflow_serving/config/file_system_storage_path_source.proto";
+import "tensorflow_serving/config/logging_config.proto";
+
+option cc_enable_arenas = true;
+
+// The type of model.
+// TODO(b/31336131): DEPRECATED.
+enum ModelType {
+  MODEL_TYPE_UNSPECIFIED = 0 [deprecated = true];
+  TENSORFLOW = 1 [deprecated = true];
+  OTHER = 2 [deprecated = true];
+}
+
+// Common configuration for loading a model being served.
+message ModelConfig {
+  // Name of the model.
+  string name = 1;
+
+  // Base path to the model, excluding the version directory.
+  // E.g> for a model at /foo/bar/my_model/123, where 123 is the version, the
+  // base path is /foo/bar/my_model.
+  //
+  // (This can be changed once a model is in serving, *if* the underlying data
+  // remains the same. Otherwise there are no guarantees about whether the old
+  // or new data will be used for model versions currently loaded.)
+  string base_path = 2;
+
+  // Type of model.
+  // TODO(b/31336131): DEPRECATED. Please use 'model_platform' instead.
+  ModelType model_type = 3 [deprecated = true];
+
+  // Type of model (e.g. "tensorflow").
+  //
+  // (This cannot be changed once a model is in serving.)
+  string model_platform = 4;
+
+  reserved 5, 9;
+
+  // Version policy for the model indicating which version(s) of the model to
+  // load and make available for serving simultaneously.
+  // The default option is to serve only the latest version of the model.
+  //
+  // (This can be changed once a model is in serving.)
+  FileSystemStoragePathSourceConfig.ServableVersionPolicy model_version_policy =
+      7;
+
+  // String labels to associate with versions of the model, allowing inference
+  // queries to refer to versions by label instead of number. Multiple labels
+  // can map to the same version, but not vice-versa.
+  //
+  // An envisioned use-case for these labels is canarying tentative versions.
+  // For example, one can assign labels "stable" and "canary" to two specific
+  // versions. Perhaps initially "stable" is assigned to version 0 and "canary"
+  // to version 1. Once version 1 passes canary, one can shift the "stable"
+  // label to refer to version 1 (at that point both labels map to the same
+  // version -- version 1 -- which is fine). Later once version 2 is ready to
+  // canary one can move the "canary" label to version 2. And so on.
+  map<string, int64> version_labels = 8;
+
+  // Configures logging requests and responses, to the model.
+  //
+  // (This can be changed once a model is in serving.)
+  LoggingConfig logging_config = 6;
+}
+
+// Static list of models to be loaded for serving.
+message ModelConfigList {
+  repeated ModelConfig config = 1;
+}
+
+// ModelServer config.
+message ModelServerConfig {
+  // ModelServer takes either a static file-based model config list or an Any
+  // proto representing custom model config that is fetched dynamically at
+  // runtime (through network RPC, custom service, etc.).
+  oneof config {
+    ModelConfigList model_config_list = 1;
+    google.protobuf.Any custom_model_config = 2;
+  }
+}
diff --git a/src/main/proto/tensorflow_serving/config/monitoring_config.proto b/src/main/proto/tensorflow_serving/config/monitoring_config.proto
new file mode 100644
index 0000000..9da3700
--- /dev/null
+++ b/src/main/proto/tensorflow_serving/config/monitoring_config.proto
@@ -0,0 +1,19 @@
+syntax = "proto3";
+
+package tensorflow.serving;
+option cc_enable_arenas = true;
+
+// Configuration for Prometheus monitoring.
+message PrometheusConfig {
+  // Whether to expose Prometheus metrics.
+  bool enable = 1;
+
+  // The endpoint to expose Prometheus metrics.
+  // If not specified, PrometheusExporter::kPrometheusPath value is used.
+  string path = 2;
+}
+
+// Configuration for monitoring.
+message MonitoringConfig {
+  PrometheusConfig prometheus_config = 1;
+}
diff --git a/src/main/proto/tensorflow_serving/config/platform_config.proto b/src/main/proto/tensorflow_serving/config/platform_config.proto
new file mode 100644
index 0000000..4e506b3
--- /dev/null
+++ b/src/main/proto/tensorflow_serving/config/platform_config.proto
@@ -0,0 +1,19 @@
+syntax = "proto3";
+
+package tensorflow.serving;
+option cc_enable_arenas = true;
+
+import "google/protobuf/any.proto";
+
+// Configuration for a servable platform e.g. tensorflow or other ML systems.
+message PlatformConfig {
+  // The config proto for a SourceAdapter in the StoragePathSourceAdapter
+  // registry.
+  google.protobuf.Any source_adapter_config = 1;
+};
+
+message PlatformConfigMap {
+  // A map from a platform name to a platform config. The platform name is used
+  // in ModelConfig.model_platform.
+  map<string, PlatformConfig> platform_configs = 1;
+};
diff --git a/src/main/proto/tensorflow_serving/config/ssl_config.proto b/src/main/proto/tensorflow_serving/config/ssl_config.proto
new file mode 100644
index 0000000..0e51cd6
--- /dev/null
+++ b/src/main/proto/tensorflow_serving/config/ssl_config.proto
@@ -0,0 +1,16 @@
+syntax = "proto3";
+
+package tensorflow.serving;
+option cc_enable_arenas = true;
+
+// Configuration for a secure gRPC channel
+message SSLConfig {
+  // private server key for SSL
+  string server_key = 1;
+  // public server certificate
+  string server_cert = 2;
+  //  custom certificate authority
+  string custom_ca = 3;
+  // valid client certificate required ?
+  bool client_verify = 4;
+};
diff --git a/src/main/proto/xla/tsl/protobuf/coordination_config.proto b/src/main/proto/xla/tsl/protobuf/coordination_config.proto
new file mode 100644
index 0000000..645c992
--- /dev/null
+++ b/src/main/proto/xla/tsl/protobuf/coordination_config.proto
@@ -0,0 +1,78 @@
+syntax = "proto3";
+
+package tensorflow;
+
+option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto";
+
+// Represents a job type and the number of tasks under this job.
+// For example, ("worker", 20) implies that there will be 20 worker tasks.
+message CoordinatedJob {
+  string name = 1;
+  int32 num_tasks = 2;
+}
+
+// Coordination service configuration parameters.
+// The system picks appropriate values for fields that are not set.
+message CoordinationServiceConfig {
+  // Type of coordination service implementation to enable.
+  // For example, setting the service type as "standalone" starts a service
+  // instance on the leader task to provide the coordination services such as
+  // heartbeats and consistent key-value store.
+  string service_type = 1;
+
+  // Address where the coordination service instance is hosted.
+  string service_leader = 2;
+
+  // Whether to enable the health check mechanism.
+  bool enable_health_check = 3;
+
+  // Maximum wait time for all members in the cluster to be registered.
+  int64 cluster_register_timeout_in_ms = 4;
+
+  // Denotes if we should synchronize the agents' register attempts by blocking
+  // on a barrier. This is useful for synchronized restarts.
+  bool cluster_register_with_barrier = 14;
+
+  // Heartbeat timeout, if a task does not record heartbeat in this time
+  // window, it will be considered disconnected.
+  // Note: This is also used as a grace period to accept any heartbeats after
+  // the agent has disconnected, to account for the lag time between the service
+  // recording the state change and the agent stopping heartbeats.
+  int64 heartbeat_timeout_in_ms = 5;
+
+  // The list of `CoordinatedJob`s that will register in coordination service.
+  reserved 6;
+  repeated CoordinatedJob coordinated_job_list = 10;
+
+  // Denotes how long to wait for all coordination agents to reach the barriers
+  // (after the first shutdown request) before disconnecting together. If
+  // set to 0, no barrier is imposed upon shutdown and each worker can
+  // disconnect individually.
+  int64 shutdown_barrier_timeout_in_ms = 7;
+
+  // If set, agents do not make an explicit Shutdown() call. Service will only
+  // find out about the disconnecte agent via stale heartbeats. Used for
+  // testing.
+  bool agent_destruction_without_shutdown = 8;
+
+  // The list of jobs which are recoverable. If a task in this list fails,
+  // it will not propagate error to other tasks.
+  // If empty, no jobs will be recoverable and every task failure will cause
+  // error propagation to other tasks.
+  repeated string recoverable_jobs = 9;
+
+  // If a task restarts with a new incarnation, we may allow it to reconnect
+  // silently. This is useful when we know that a task can immediately resume
+  // work upon re-connecting to the service.
+  bool allow_new_incarnation_to_reconnect = 11;
+
+  // Disables coordination service.
+  // Some libraries enable coordination service by default even if the user did
+  // not specify any config. This field allows users to explicitly disable
+  // coordination service under all situations.
+  bool force_disable = 12;
+
+  // Use long polling to get error from coordination service as the error
+  // propagation mechanism.
+  bool poll_for_error_from_service_at_startup = 13;
+}
diff --git a/src/main/proto/xla/tsl/protobuf/error_codes.proto b/src/main/proto/xla/tsl/protobuf/error_codes.proto
new file mode 100644
index 0000000..c873d55
--- /dev/null
+++ b/src/main/proto/xla/tsl/protobuf/error_codes.proto
@@ -0,0 +1,155 @@
+syntax = "proto3";
+
+// TODO(b/247876220): Change package and java_package once we figure out how to
+// migrate.
+
+package tensorflow.error;
+
+option cc_enable_arenas = true;
+option java_outer_classname = "ErrorCodesProtos";
+option java_multiple_files = true;
+option java_package = "org.tensorflow.framework";
+option go_package = "github.com/google/tsl/tsl/go/protobuf/for_core_protos_go_proto";
+
+// The canonical error codes for TensorFlow APIs.
+//
+// Warnings:
+//
+// -   Do not change any numeric assignments.
+// -   Changes to this list should only be made if there is a compelling
+//     need that can't be satisfied in another way.  Such changes
+//     must be approved by at least two OWNERS.
+// -   These error codes must match gRPC and protobuf error codes (except for
+//     DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_).
+//
+// Sometimes multiple error codes may apply.  Services should return
+// the most specific error code that applies.  For example, prefer
+// OUT_OF_RANGE over FAILED_PRECONDITION if both codes apply.
+// Similarly prefer NOT_FOUND or ALREADY_EXISTS over FAILED_PRECONDITION.
+enum Code {
+  // Not an error; returned on success
+  OK = 0;
+
+  // The operation was cancelled (typically by the caller).
+  CANCELLED = 1;
+
+  // Unknown error.  An example of where this error may be returned is
+  // if a Status value received from another address space belongs to
+  // an error-space that is not known in this address space.  Also
+  // errors raised by APIs that do not return enough error information
+  // may be converted to this error.
+  UNKNOWN = 2;
+
+  // Client specified an invalid argument.  Note that this differs
+  // from FAILED_PRECONDITION.  INVALID_ARGUMENT indicates arguments
+  // that are problematic regardless of the state of the system
+  // (e.g., a malformed file name).
+  INVALID_ARGUMENT = 3;
+
+  // Deadline expired before operation could complete.  For operations
+  // that change the state of the system, this error may be returned
+  // even if the operation has completed successfully.  For example, a
+  // successful response from a server could have been delayed long
+  // enough for the deadline to expire.
+  DEADLINE_EXCEEDED = 4;
+
+  // Some requested entity (e.g., file or directory) was not found.
+  // For privacy reasons, this code *may* be returned when the client
+  // does not have the access right to the entity.
+  NOT_FOUND = 5;
+
+  // Some entity that we attempted to create (e.g., file or directory)
+  // already exists.
+  ALREADY_EXISTS = 6;
+
+  // The caller does not have permission to execute the specified
+  // operation.  PERMISSION_DENIED must not be used for rejections
+  // caused by exhausting some resource (use RESOURCE_EXHAUSTED
+  // instead for those errors).  PERMISSION_DENIED must not be
+  // used if the caller can not be identified (use UNAUTHENTICATED
+  // instead for those errors).
+  PERMISSION_DENIED = 7;
+
+  // The request does not have valid authentication credentials for the
+  // operation.
+  UNAUTHENTICATED = 16;
+
+  // Some resource has been exhausted, perhaps a per-user quota, or
+  // perhaps the entire file system is out of space.
+  RESOURCE_EXHAUSTED = 8;
+
+  // Operation was rejected because the system is not in a state
+  // required for the operation's execution.  For example, directory
+  // to be deleted may be non-empty, an rmdir operation is applied to
+  // a non-directory, etc.
+  //
+  // A litmus test that may help a service implementor in deciding
+  // between FAILED_PRECONDITION, ABORTED, and UNAVAILABLE:
+  //  (a) Use UNAVAILABLE if the client can retry just the failing call.
+  //  (b) Use ABORTED if the client should retry at a higher-level
+  //      (e.g., restarting a read-modify-write sequence).
+  //  (c) Use FAILED_PRECONDITION if the client should not retry until
+  //      the system state has been explicitly fixed.  E.g., if an "rmdir"
+  //      fails because the directory is non-empty, FAILED_PRECONDITION
+  //      should be returned since the client should not retry unless
+  //      they have first fixed up the directory by deleting files from it.
+  //  (d) Use FAILED_PRECONDITION if the client performs conditional
+  //      REST Get/Update/Delete on a resource and the resource on the
+  //      server does not match the condition. E.g., conflicting
+  //      read-modify-write on the same resource.
+  FAILED_PRECONDITION = 9;
+
+  // The operation was aborted, typically due to a concurrency issue
+  // like sequencer check failures, transaction aborts, etc.
+  //
+  // See litmus test above for deciding between FAILED_PRECONDITION,
+  // ABORTED, and UNAVAILABLE.
+  ABORTED = 10;
+
+  // Operation tried to iterate past the valid input range.  E.g., seeking or
+  // reading past end of file.
+  //
+  // Unlike INVALID_ARGUMENT, this error indicates a problem that may
+  // be fixed if the system state changes. For example, a 32-bit file
+  // system will generate INVALID_ARGUMENT if asked to read at an
+  // offset that is not in the range [0,2^32-1], but it will generate
+  // OUT_OF_RANGE if asked to read from an offset past the current
+  // file size.
+  //
+  // There is a fair bit of overlap between FAILED_PRECONDITION and
+  // OUT_OF_RANGE.  We recommend using OUT_OF_RANGE (the more specific
+  // error) when it applies so that callers who are iterating through
+  // a space can easily look for an OUT_OF_RANGE error to detect when
+  // they are done.
+  OUT_OF_RANGE = 11;
+
+  // Operation is not implemented or not supported/enabled in this service.
+  UNIMPLEMENTED = 12;
+
+  // Internal errors.  Means some invariant expected by the underlying
+  // system has been broken.  If you see one of these errors,
+  // something is very broken.
+  INTERNAL = 13;
+
+  // The service is currently unavailable.  This is a most likely a
+  // transient condition and may be corrected by retrying with
+  // a backoff.
+  //
+  // See litmus test above for deciding between FAILED_PRECONDITION,
+  // ABORTED, and UNAVAILABLE.
+  UNAVAILABLE = 14;
+
+  // Unrecoverable data loss or corruption.
+  DATA_LOSS = 15;
+
+  // An extra enum entry to prevent people from writing code that
+  // fails to compile when a new code is added.
+  //
+  // Nobody should ever reference this enumeration entry. In particular,
+  // if you write C++ code that switches on this enumeration, add a default:
+  // case instead of a case that mentions this enumeration entry.
+  //
+  // Nobody should rely on the value (currently 20) listed here.  It
+  // may change in the future.
+  DO_NOT_USE_RESERVED_FOR_FUTURE_EXPANSION_USE_DEFAULT_IN_SWITCH_INSTEAD_ = 20;
+}
diff --git a/src/main/proto/xla/tsl/protobuf/rpc_options.proto b/src/main/proto/xla/tsl/protobuf/rpc_options.proto
new file mode 100644
index 0000000..35c5dbe
--- /dev/null
+++ b/src/main/proto/xla/tsl/protobuf/rpc_options.proto
@@ -0,0 +1,41 @@
+syntax = "proto3";
+
+package tensorflow;
+
+option go_package = "github.com/google/tsl/tsl/go/protobuf/for_core_protos_go_proto";
+
+// RPC options for distributed runtime.
+message RPCOptions {
+  // If true, always use RPC to contact the session target.
+  //
+  // If false (the default option), TensorFlow may use an optimized
+  // transport for client-master communication that avoids the RPC
+  // stack. This option is primarily for used testing the RPC stack.
+  bool use_rpc_for_inprocess_master = 1;
+
+  // The compression algorithm to be used. One of "deflate", "gzip".
+  string compression_algorithm = 2;
+
+  // If compression_algorithm is set, the compression level to be used.
+  // From 0 (no compression), up to 3.
+  int32 compression_level = 3;
+
+  // Setting cache_rpc_response to true will enable sender side caching of
+  // response for RecvTensorAsync and RecvBufAsync to allow receiver to retry
+  // requests . This is only necessary when the network fabric is experiencing a
+  // significant error rate.  Without it we'll fail a step on an network error,
+  // while with it we'll be able to complete long steps (like complex
+  // initializations) in the face of some network errors during RecvTensor.
+  bool cache_rpc_response = 4;
+
+  // Disables TCP connection sharing when opening a new RPC channel.
+  bool disable_session_connection_sharing = 5;
+
+  // Setting num_channels_per_target > 0 allows uses of multiple channels to
+  // communicate to the same target. This can be used to improve the aggregate
+  // throughput on high speed links (e.g 100G) where single connection is not
+  // sufficient to maximize link utilization. Note that a single RPC only goes
+  // on a single channel, this only helps in situations where there are multiple
+  // transfers to the same target overlapping in time.
+  int32 num_channels_per_target = 6;
+}
diff --git a/src/test/java/com/github/tadayosi/tensorflow/serving/client/ConfigurationTest.java b/src/test/java/com/github/tadayosi/tensorflow/serving/client/ConfigurationTest.java
new file mode 100644
index 0000000..b1246fd
--- /dev/null
+++ b/src/test/java/com/github/tadayosi/tensorflow/serving/client/ConfigurationTest.java
@@ -0,0 +1,34 @@
+package com.github.tadayosi.tensorflow.serving.client;
+
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+
+class ConfigurationTest {
+
+    @AfterEach
+    void cleanSystemProperties() {
+        System.clearProperty("tfsc4j.target");
+        System.clearProperty("tfsc4j.credentials");
+    }
+
+    @Test
+    void testLoad() {
+        var config = Configuration.load();
+        assertNotNull(config);
+    }
+
+    @Test
+    @SuppressWarnings("OptionalGetWithoutIsPresent")
+    void testSystemProperties() {
+        System.setProperty("tfsc4j.target", "dns:///test.com:8501");
+        System.setProperty("tfsc4j.credentials", "aaaaa");
+
+        var config = Configuration.load();
+
+        assertEquals("dns:///test.com:8501", config.getTarget().get());
+        assertEquals("aaaaa", config.getCredentials().get());
+    }
+}
diff --git a/src/test/java/com/github/tadayosi/tensorflow/serving/client/TensorFlowServingClientTest.java b/src/test/java/com/github/tadayosi/tensorflow/serving/client/TensorFlowServingClientTest.java
new file mode 100644
index 0000000..f43498b
--- /dev/null
+++ b/src/test/java/com/github/tadayosi/tensorflow/serving/client/TensorFlowServingClientTest.java
@@ -0,0 +1,30 @@
+package com.github.tadayosi.tensorflow.serving.client;
+
+import com.google.protobuf.Int64Value;
+import org.junit.jupiter.api.Test;
+import org.testcontainers.junit.jupiter.Testcontainers;
+import tensorflow.serving.GetModelStatus;
+import tensorflow.serving.Model;
+
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+
+@Testcontainers
+class TensorFlowServingClientTest extends TensorFlowServingTestSupport {
+
+    private static final String DEFAULT_MODEL = "half_plus_two";
+    private static final String DEFAULT_MODEL_VERSION = "123";
+    private static final String TEST_DATA = "src/test/resources/data/kitten.jpg";
+
+    @Test
+    void testGetModelStatus() {
+        var request = GetModelStatus.GetModelStatusRequest.newBuilder()
+            .setModelSpec(Model.ModelSpec.newBuilder()
+                .setName(DEFAULT_MODEL)
+                .setVersion(Int64Value.of(Long.parseLong(DEFAULT_MODEL_VERSION))))
+            .build();
+        var response = client.getModelStatus(request);
+        assertNotNull(response);
+        System.out.println(response);
+    }
+
+}
diff --git a/src/test/java/com/github/tadayosi/tensorflow/serving/client/TensorFlowServingTestSupport.java b/src/test/java/com/github/tadayosi/tensorflow/serving/client/TensorFlowServingTestSupport.java
new file mode 100644
index 0000000..667a657
--- /dev/null
+++ b/src/test/java/com/github/tadayosi/tensorflow/serving/client/TensorFlowServingTestSupport.java
@@ -0,0 +1,31 @@
+package com.github.tadayosi.tensorflow.serving.client;
+
+import org.junit.jupiter.api.BeforeEach;
+import org.testcontainers.containers.GenericContainer;
+import org.testcontainers.containers.wait.strategy.Wait;
+import org.testcontainers.images.builder.Transferable;
+import org.testcontainers.junit.jupiter.Container;
+import org.testcontainers.utility.DockerImageName;
+
+class TensorFlowServingTestSupport {
+
+    private static final String IMAGE_NAME = "bitnami/tensorflow-serving";
+
+    @Container
+    @SuppressWarnings("resource")
+    static GenericContainer<?> tensorflowServing = new GenericContainer<>(DockerImageName.parse(IMAGE_NAME))
+        .withExposedPorts(8501)
+        .withCopyToContainer(Transferable.of("testdata/saved_model_half_plus_two_cpu"), "/bitnami/model-data")
+        .withEnv("TENSORFLOW_SERVING_MODEL_NAME", "half_plus_two")
+        .withEnv("TENSORFLOW_SERVING_ENABLE_MONITORING", "yes")
+        .waitingFor(Wait.forListeningPorts(8501));
+
+    protected TensorFlowServingClient client;
+
+    @BeforeEach
+    void setUp() {
+        client = TensorFlowServingClient.builder()
+            .target("localhost:" + tensorflowServing.getMappedPort(8501))
+            .build();
+    }
+}
diff --git a/src/test/resources/data/0.png b/src/test/resources/data/0.png
new file mode 100644
index 0000000000000000000000000000000000000000..a193c47ba45d876f6231d161c76a406da3b9418e
GIT binary patch
literal 272
zcmV+r0q_2aP)<h;3K|Lk000e1NJLTq000~S000~a00000S7v}n0002jNkl<Zc${Nk
z7+7G-tW|cftjrdR6yN5rZ$AA4g0HVbFeIk~#jhUNv}wz~e;@VG<g{P^d*mR)!@$7Q
z@aO+pK~zbLe}A)~%(h?uyiq0X|ILH5@BW*MDtYK%I-EW2-&GVjr91Df;G7UN$+~~5
z8E{HodvAkB@;C!dN$%TtB$NIjOD_6%5vQc_JJdixmNZ`a?=TCpq|-;ZB=horkIhjW
z%W&<g2$<HJaq8bcdj>Sge`mIU!0UhiUd`b`lMFoj4+8$Ys$jqXQkTGTGcz!y=n4SE
WdUQhQ?LY4T0000<MNUMnLSTYD$A1q1

literal 0
HcmV?d00001

diff --git a/src/test/resources/data/1.png b/src/test/resources/data/1.png
new file mode 100644
index 0000000000000000000000000000000000000000..765d4e29a93b46962ca0c986a3a67d58c573e7fd
GIT binary patch
literal 181
zcmeAS@N?(olHy`uVBq!ia0vp^G9b(WBpAZe8a#khji-xaNX4Aw1PK<0xAA&;CqLLb
z?ohpL|7hCfzec><Q%=}d-<h=hUGd{7!9U8Gw;%ga-(Kkb?a$|<n(05k>9G5qob}(k
zaQ3cW@d|#Ho#NLEv)BAEm*4*6OMUGQ)%kyl4{4_VmhMk^zkl<Y9Zz<w`@j0g%86fZ
gu<6}-w@{pcVUoAn1eL?vxIymmboFyt=akR{0L?>BTL1t6

literal 0
HcmV?d00001

diff --git a/src/test/resources/data/kitten.jpg b/src/test/resources/data/kitten.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3353026abb02ee9663bf0fee14c22c11c5640175
GIT binary patch
literal 16016
zcmb8VbyOTdvp&4I!{YAl?(Ul4?(VL^-EDCV?z*@Jcb5<d1c%_kgYzTrd+&Gu{%ZQ{
z%sD;V(_K?FPt{ZXzWTlm04m7H%K#uggb9KP0C-;qNCFUG;o#w55#ZtB5fKrPP;t>v
zQBY8cu&~i_DM%<O$w|n_s2RAKscAXr$jDekSvh$5g@lBtm?dPy1!TAdh4}vs0)dE#
zh>DC#h=xYUPeVq-|NmRw2Lb5tACnq{f}j9EqC-HTL%a_I2mk;`C<q9^{|N*n016rg
z_M;E~-zq-j|M~zBkRN^TKLN;45CBLZ6c7M_*y(WG?`n;dfx-=*$AFNM3+_*rRGDB>
zE0i&`G|9kd$d5g%(J7y-%~`zbDMatV@&%WD&sFQLYp3%idN~nWfwXEe2}hSJ6aQ4*
zDjUH>N|=DagvJC?31(u$kb{J7Y~)C%6NiBb(+!1Ys>_f4+Phn>n}5<&V<P7X`qc7U
zxkgl$*Vt`&=R5x|!n3P$!liz4#T1GqDu5Q`I1~=z&Z8#_Vj~0v$RZqAiwBuJA%nmK
zKvy_{fJu?Vhqa?Ol+voVH(dK8H#w-aC(!<V|J3EwCN#{voi`z=b>!}ghInl%8MwH3
zwV1j!hJd0muN$mus#d80rV360#(V<A0Ez)*7~@op=&)){)!CLF>z}>rUoJ8Upd04*
zygUCaY&iZd`a0RlX_$VsxW#|`(pND_<&*_I4l714jX0>vnw|ockcE;AlptF$u>e71
zlR<<6vEfttsTwD!<18hxb!A*Oo>p@Hj0|`e4l(CD{9-bypT&9oB=jxkR3ymDbgn^v
z$}foc=thme^8lGs2@GGTOJac6tou7>y+A7zS`Lp)E=(GV4k(tUFwWqi*jIr&_p_GR
z);g<UY2F31Z(#ZNk{`F?LW4kqrZ1(pEWXs%?75wOaa{4QtUpRV0vlEJzcX(Gtt$lf
z8~O+_V%#lI#19^q&3GeHXvu4Yo^|XaLd32+^cmT9%o@>=p(zwHe>-fx)V+|jRv2W&
zwk}|_SKuc8Fgw%pMD01B6VPQ<2i1?Q2<p#bIeoRA<M;BW%{XaLo_!+W{p_Ff)7SrC
z54!Gg=_l$HcE(7G;#fw5NNbde93&OFDyn@%WE%lwIXnhRK4E9RF=?40=hbm5hol#=
z?)5;fli})jk5-PGyZdZ;aMO{e%dOze(3s$|{vi7DR7Fu$YHoY#5*~3yAPRbU?R>^5
za=A_obg?`OaM7-%RUu<I9SR@;0iV_n$DJ}5pQuq|KTLhDE%OJhBJDWG<CbVD6(if8
z#i-o3Ju%}Qz_ajuVbcF};n#29UhrYV@mAHq`s!+4f}xP$$fjS##HL%7QP0YL*BF^I
zGDI44sNO+J=V&))A)Zd03BCS`o@FeVD?G8LDFmG&NhT4L>4nQ2^WTT=KbKD(#B2$u
zT8_WI{B^h)BKY%Ta8bL&@yquop_{w8*50n(j-A!(-vVoM^f%pC&irZRH}?C6<|Y$N
zKRDToRYpMMiDxT&W%Gn&6cbd~Oz0}DTCHF+uvs-Mv}K&R*7$+mPz~4K4^3X}-P>zl
z53j#@wAO-LPQPk!V^$rxt#j{$L>_~be+$04H%f02@cVV>TKskxSz?xCRf1sdlx+>S
z=J>%(&2iK-z9(oU(`i$hf`km6BCa-p&0O6|J*h!VQ=+AqQ?6)-PyM|lsbp^5s%ORJ
zR6B^Lr+q7OzxS8`bpE~5tGe#bq_tCEQ%?=1z)X_-$`{sdQUe4H#RHnDGA~6YIiY%8
z6;8ogg((WDCT)h%%8YPIY4|B>Y0Gqsd6jmm#q@eq?P^s=#{>_qYvY2?vzwgPKfIMm
zD{~#E<~NO<S(ObuGO2&gIt19bcD^MA_k0}<Y!G(u2yhY&^yF*S;n7MjVr*n8g9f=N
zH4FBrDV9;SX<{rXv8G8%%_<(Cb0MPW*@eL%W;c}Z6wC0USP}o06|6s8ighBm(5{OA
zL)-dW(CYNc*s^6Q+P7@&du2?nGH-R)SEoAv*_XNn!wg-bXocZOZDgBN-NR(-94hM^
z@~=j%DlO8k%NTTvjrw%3pnR?G_0x+fR&}9w3l&J$N9QSfpEhpty&Xm$^VO%D4E4Y^
z%`F{&J;pebP9Juf3)*Hg@FSbA1o8r?k4uQnR$rd12@X9I9a41>NsDes$Yj&2S6i#h
zR?2EL(!YyoK+*%DknHs_sPB{<YRwi23Az#t9&73hG3uAQzm~_JCm3hTSHH=eo6qya
z_jcZO+&_M@MytFbZe>omsT2C8{QGvvDr&ZpcY-s4ZH#MVTS`>@urY-U2)PX8XUIqi
zabduPEV5yUr^eQ|^31SRItgn!{*rn2J>T8kn`!jXy>@Mj?~YL5!19*f8cJ5r;6_=?
z;77A?=XYyG_g~uFW%lk`G%J+sozj^o3XA(1Ntjc-j(%>ti4S~#R?Sld7LDl7$*1!5
z4))i-9qoSY=QnS%j}vrcC4LGM7;|ciuMp?#Ev<8sw`-b^TVWvcgVt)ST|><2w`v-V
z%~{eMqliOEWA3l|s;23NJw;{Gv0i&%V@dH?W#F2xfgHQ|WSPNS8gZtrrE{_XHHl=3
z(Op9nhnR8@LfsM)9j~KEE|mz+gjGMC4yHG-$ELDJ*Fhk_nqpK(ZvMbU1|R?u5&{|m
z`X6%s7ak!X(V?J$8045_6zpPHY%m<+DwLcOA3zEJ(GLLw`CMxuee5TDB>L?>x|d1f
zCopYpWMb;|)uEy1UMheoI)SBN+*Ku!{)+oS$a%>!vo-S_01+=0fNb$eM3iaSXCTHq
zMMQ~P`MWfh_6u%{Fq(sXTBF&OH0OR=M^@QpJrQTnm|t(vk%b_aULah5@-GZ4(Sm)-
z=666UbJ3CY)#dLLK@KGll>R2^592_6sePvCw7BMniVgHjuQ64kxDt+D-qVIY^5#^2
z9zk|mPLkZ1ieYe8sc3A8Ac!PhlTsCF=&AVIful7`{klZFG}c!pw(MNwmPfXO`0=X=
zc!g*S%Qd>{W%0789q%M2pZ(kRE@NZZq!c|S<gcNypHSse!b{rwk5e6ynv&iD>`myY
zSUq3(tCEL{QhO|QQKN5JRywaD(Xu9svact4^V;PUPi-F_zpzgxnvV;W>`s`Et~4Nx
zn)}Bz;9bzLdT7tmcYx9?ZZ?ecub<t-J=_wmDUPO4?GQ`f0S{CAZbWT$DHyMrPnQBy
zte|v8+miHV%KJ-f4e(mxXQ;7#)eGOijf3$e)>n?Hfs{w{!CWE%UGsQdO3q@Xx${&%
z<^}BTC$Ptx8p!4mUrxh9OM$72%sPq!pE~*-Fe_y`Mr6#y=i<)oYb=7o+D#&4#Mi5K
zZQr8H@kx;<dv|7EaZj&Ze<Yw5qh(=y-^<mZ)bhx_UMT{tm|YjVcYsjoHk3q6S5E~4
zzxy>n>CSCoccdcH;EJCqQi+kMasRN@v5k2V_h)=Zx0;eul3?u31ZZD5)+s6FCzh6+
zsWVBro=-bk^w6&e{n6K-rFS`Rd>&bD)^3#4l*%|MBla{bu-2O^<N?!|<xiv*pjaG#
zS$eF<o~ZDFM7fEw_;RqHB`!WCVX<u%r^QH>`$UGC;h;um%U=FQo+Z)Z`6P8d2jd?}
z?nG{)45oTC5}O$dgWMz?Uk*E0v7~FPKrLG&p0<Np92xUeEBYp%F9%l)iSXl*@WWT5
z^xI*y_)?SEd)D~OnZ;8~t@m7y{G%?MyIk*ppU-poGJLfB7OD|nuEg}x0$mF-3>I<^
z5K5Uf)Rzu!>^N-qf5#H><QXJ_Dnay<8`%zOOwFM2rYR=nAJT!(HbYhW%1vqV0=mZZ
zd;TWB$)&e?hM5BPhxeOy=Hf@^_I8Gl<p1z|c9?Q=i(hahSO<;wsRdbKIEuvgEn%_v
zQ_0Gj4<VZ$B#$MC%YIc?hDp#i9yg8Tv+-O=iIiq8kb359vCNgF5isxQZVFGN#?med
z2tRJi7LAv8i^8L~OEb$blmeT%7@$&7>f4(>ax5OR_a{=dIZUuLDI5q&-k)DnQS@?Y
z|8&s(tOed;YO35BzUC7p2=fb4<B*%;r6dt4B3R3?GLI!_a@nScYR%BKJP$G(of!Y{
zhjsGx^zk8A`QiBJ1Ikp;gn~SLl8wU@`;9e@{9l74Vtf&GR$oTHpWj+j%+CT3yHqlb
zK7Zzj!AUw>Rsms&Y0yMY*CnD#r4?Gu@m2DFR*A_i<6RAJYZ`uKU9!y34FZFk=0dzh
z)(wG_BFjf=wN8e+Qr9(M2YBaHu7sL3dz7DIV!5mhC%h=81OsFFiQ+c@e(3$9^{G?c
z%T6VIj;^77^|(r6%ImC19hlm<Za_r@p*hhZr&a0}TLqr!ICjO*V}x5+4suHuP?@)4
z_fh=k@Bba--|wjK*R3<nEbw2tDrGEFqNIeKow3y)Nu<bno^VjjpowBz`RjOT7Re$G
zt=s@RN{KUxk9k{DKmVIV)2u8dXx!~rZ*jCVdf<IFiS19<5ej~d_%F)iw#<sO)j2-b
z^06@z(+>}+%MS-B?*P(I8T_VldBm0pDWw;~vzAS3S}uHfpW&cF`nUgG&|<_g{_s=P
zS1EK&pea^(WGR;6qWAeeX%^MlL}Zq>UlK(NqF)=tr)Zhi?Ms<lD-0cqGt3=B;VD<2
zsLk6w1>t>+fld+obZXu7lVPIC7&){Zk3zA>Q!Rx=9++lL<i^1eNvvd1gx9}(7%xjN
zEQ4I%DsuJ{cYmg8JXcO`mo86%Za3~fi~CUW^2*;P%Q)~I={@5X%f~0iu#@z3;@5;l
z_);Ql&PwuRkKRaz%va3t3d_%KNn9Q1F=JhP9|P3Y_N_@{_d4sZtS+Q{F6`FG^>Q;$
z{pMC>kw3<rY2D>G`N?!e5ntG#NoE}b14|~67G0`{B9^YC!O++KcHulmi?3}3pYzI6
zQIXuXqaeY0Z@#f)kePp{WH6AydRID57ONyOs{GRf-Ht@`u2kJmy1G+eAzd`AIjlcp
zf{{3T57(ju7A<FbR?J+fdRjg<m);M*0vU1A3*uyVbswyEORfr&pwS3~%tHByKGSW-
zP}C~KGFl>K4oE~`FAOVo(G|BBN47~jJ^uvBJ&?>`vC~p?zXNPChZXqpq!M9>q}mm|
zdr5ccg6NU!PAy_L+x)cV{Lrab6B#W&`Dx475S%d8;7WVT4%~DxdUgGb)b`U_X8q&<
z(%1TaYW*PX^|Xe5TjY&>J^s>g7Ii~faOL>cyE1Nm7nCb1vGsB*D!KIm=M?Dwf%AV@
z{=b<n1P~n(gN&RF3X@$-Rpmc0{?RQ8aq0)$8Pw8r>>G}cz+yq}B;9w3q*EyF;Cz0V
z-$6mZtCtl~?rvyBmeWb+mtD3hE<W@3EVBytT{t04C^BPFh(N)6;f&n-ZT8%nW1o8F
zzm`0l6c+GJs3Ar&w!>8wB*76dG2hgovZ0x6Lf$lnkEJT@Wtxu2vQdxRNm#rylKH?k
zS>(zbcf4PhNLL(tF-GptMbx6buhcY_ykWq8uWE>mr7DvWOF+P250=oUThuUUARISB
zJL=)fVSmGFTe)KmK{9t$G7^e+UD^bdE{=o=d-=}L`IIP<#@`iD&-d8em8>Md&nMu0
z?NK(OiT^}bKlKj4{Lm83|7z(YocSL$eFQx~C=7CTOg1qxRV)e)ant{33i6+(?iz-0
za0nHfJ@0P7Xt+{+-AzRPHOP{~`MZzgKSoE=Mkl61`Ir3t9VE>`mhp)KTw4e3r>Ui1
zbPufWpgmKGq6U0?lt2}+V}TPa9GA7f{5fmASQLf@an$ZlCGy`WL{Qd!uxeu#%*b@c
zYmJ*!H!4$H&&~oes8)C9uE;s$<c<>2Ei;UpwFp6ey0li`@p2VPJTE2au`SIQv9t%O
z%1gogL|`zwMzkdoj;`f%1=>BgLuwDX8&kOv<F+|9rfG``My<+x#5Io-x(%4qwZlQp
zjCFI95MM*pYWJ#LaFL|_QrMP6#yKeWmHkjCAt7FFT|%dYe>kmS%Q83lFo{Aod4v8P
zFytX!8CCUy(#E7JZ5CBM$0!tk%{f6iK{@qtOi=m}26izn=?2MusF?dBu7rSsgn@#D
zhC=wSjv>$?$=QJzWa28Srce|dVj-Ap5(Ch#AWBY2H50QU@WTIun6RRd?*LUlRg1Rz
zhOUMfC+V5nxB42Q2nDi~YTshHHeCUU*`z8B=P$dOp0oo<8c*rI@~bzcsJ`L2J=t?G
z>2aCNOTU;rsn3N{nq;Amp$sAIUEGCRN!ByVPS=tj9Ms>m9BY!dgJwsfaT?{eek{>9
zP4^~&Qai6?5VT9z)zaSqvc9?)lH|WTmd4u|f$5o?SM<-(jxlvsMYq^P`KH^A2MULd
z+QwyXV<F!xFm40$wdpHBRK3Dy&Cj;P5bWGz`TiK4nOeN{!xxQPt1q9uu>6muJ^E76
zAB^%(4xNiDUZP)x=Q2{~c`mm?|1z78nUzi3;_?*9q>Jp`ibjl`+#v;=Eb^@Wl8-*(
zP=@vB?h9ScC{|K`Ri=`W)+0ZPOd2S!XiC2bb3{5bc<7|5AVJD|!r8<V#JKb_j3cK7
zI_1~KtPE8bfKGbbYpB4yDHMk+(kd;3Y$jtr&THhCX4^$C+so92zop9#4wfueIQm+D
zT5z+Ij<<Kf{kTUg(O*RAS@GBoew88H?E<>0lO(4=p0SVWB9xc6+(F;qmR?FT-&zTu
z7BqcRac`&F)umv!&!Rq(Q^}FNdbQ9fA+<dH4wyiYSgf|yBF8;+x?NeI`7;D_>7*_D
zXW{wSY=+94MQ<GJs!{;PV;-2ki1~<h?Rk1?J>(Ww7azzePL-)=TCQY3r5MVR`Mhot
zLG!aI6KuM!R`>xN-p7d5Z_BAP9y4*aPJ|?T7$)B`kABd~4+s`f>E~jSfyz>R>QZiH
z6*c}+vLMmN;k`=Xm<Nn+PjIcJ8Y`+b3FOjyO}rjt9UGNSTu81@Cv5EdJ}gmpur8EN
zfP_jV1yj<e3ow>JpXU6J-PEv!UI@WNBW(w>BirFm#A~UP_HsgCDnqImPgEF-3GK_z
z@LrsBUjxRD8C5d7=R)ZE*rkj?4<Z$-5s&y!>&dZFF;uzu`7}fr_0%DnpE!nV$`*{R
z6o^vOg=V_tgLbYjv|ps{sMpX+Hu5QrsUJxuEy^m>x1^30C(YN@tj`pSntoLJ9ZQ*)
zZV!YZ54fpvy(WCCj{PxO_MEo|i@Whn?#VCEn#SaznP5x6WHfVoz>#|E$jw{{R~F>W
zPn4NTqnx~Qmd&i1O5<cyY;7FNUd}zX|GD|m{Sj|_3@#BtPyeLhR)-Xp?<GL`TiGka
z(>vh9f9Ze#s1GRlkdG78e;gPB{lk65RZYoU*+M`?V6laPoy-5t3Ps7~BpynsFZ&|m
zgP_?XA6R$vxo{`p4ZxT4c|~sF;eCudN!w8)@uK6Mn2@BUE4-3~hEpU{*&_!Xydw`t
zY2lrHA#Pi&+a3PKlm5KhKO#xj@-$LRhBt2+XhaJ$kxTsX%#uRWB*>vV%U+N&Fa3)W
zu6c2<nU%Fh=mzvjo@^Rr4kWEAzLB!ogI=J?5!~JZoTMYj{_{gSt_tNZD63gjmf$qc
zQf?otZlQQI>$0ADQUnxt*k~?sa-`hH2DC%}3m?%40*Xs&9PJ-PXVHvp!?o`K*l$I#
z<l5hd0%1jf{~mn9e*GtIH&LTCJn3_UOSA$>utQP~Ymp`COu|Q?Hn<h`2#$+g9n#@z
zlX6ZWJejZ1S&qvdCj0pg7*?+mV3MO8v+!rJMz+9xfnh!HFXp9P#!WW$M4{iVDS<m`
znzvC}R^S=yg5ojN%k>hTk`lp1isXp@^A1P|q;q7MA0;50K&j4lJU$DJ{Xz+orU576
zwi(tOHInqHQI|(%FY$onB~QBSIh3{1m;Qy`hk18+4EM6$c&<a{i?M`*z%!wq$aFaF
zmGXR5-V)*1tAp<X0Yigq{Y<-Li5*r75AOhF(2l+|eB?!p*yas^ph&npj0Mq3gD~<>
zKeaI(_Dts-5yp{dcf|)=1+|x;@-Qmxg+<<L#%sJ**=#Djm`qYh<_a@6LnG;lacaZj
zUoi)wbo!7?o%0@zeW33~GakAVq85xE`UR#8&qLHM*qkz^q~HSaKu$?8K|P^JLah@J
zZ$u}YPBCf(lU_kBGlD({^4Ku3S*cYPL<4f5PYV-|d0a?_t!e{h#SviYW&ee<>ohFr
zC)fX@-;3H6j}V1r$Ii&`4mfgfVB>N3%-(nIJUx==I?l_zrz(uUrSQ0AzUKM@G@#zj
ztKcg8_lV0UHs{%xKXD82<)eMH`M-}YV@SP^oX)L(np68ZCB)u86DitCU24k;60)XK
zXf&oOa|Jp`k|X=b2pS#H6s!|#Jmn+mqW%J=NA8*7nm(WlLvP^y0I?{BP`r?f!q6)|
zaMGr6+NjRwCyWAn$nsh^tZInMwO7}oZ@=Jo_eG@*2e@s$!bpE#$dErF7;Dk+_Izd^
z61LG;?n$BWwndRY{bFs#r~Aa*{dbK%up57#e23;Y{AbIZc!yE~0^02|`oPc1!O8Vm
z2}k$H5^p16@Y?!y8BIR#fK_YlN8~a|RwM~`*@Y(MPZUSLV4_od?>X$M*knoPe~>1m
z_=8YR)b7#l1togXz|WH9>T<?YBG7_f-^TtpCqYe>ZMUdlnLmgZ6{B6MWga98aSTd_
zR%Wh#rTQ8*Cq9QXW|i?ZhxuEofJm|0(00TfXD@G)*F6h8&lo0~BGwDusYj-yeo`@&
z%y0<frG(A04~x1)eOZ8ma}$x39_A&q$M}Meb7b_Rxum4j3zm|SF%n1tR6yfuVN@J~
zX9R^w&8@W%hiUc>m`^&jd~iI8Rg>K|wcfRo@lw(MLX*VWCJ1|KbDH&pVbUid-?%?8
zleP%7J$`0Pk4l}O@anMnNe_)V{0-U0CU4N+R%+5nxX8vka=vz2wYnwJHdT*p-btgP
zZOtMO#9o8-iXen+YtAa^RLvQxM9^Yw@r-re|9p#&jb621Mq2`%8$fAG91a5del|@K
zHeYw%mpd;gdZO(+z<ZOW6OW`Fcu~iEpgqNl$U{z#eFvBeLDXZl)Pbmm;w!a>lOXG4
z-Xbwa&T4y}aE|Yf(k<<{D5&xMA_r8d<cpF|1dmbaDn7z1N~b?dlGM7_(h)`MGE!^O
z37IjXxsp<}X!{Y9wDuN~=24z){1XZC$`<Uelw9k?CdPk5%e4~N7TSxz>3z_<Rrg9i
zAga5lvt5xP{khP=Wn(1Wn)me!C_a5JCF14WTWFKaE@Ykaq5Xp|K$|crf`@Y2!Cgj&
zROoJOjX+tW<O9E>o+oi`{YM;DQyySh3=GdII^^tDhLrvzXRCjVG!Fy3(l3nRMr*C+
zK$zR-Tg|>!AIjIFp``iLBFyM$s!+_APbfee8~Yp@8j4tbiWh{U{dC8i6!2RpI@#i9
zXl2UDmeqSGbZ3Xm4Pzx{msK;}l<)Y5TJ{bkb>F4i$4>}$Hiuc`Fn9Q*S$AzL2na|K
zVy?*yyFLQ4iSQ^7{z|WX#dhaoyfIO*(oP@Kc_ZaJ020m03?Di+O%l9F#Yzv=^~q(@
zAOJ;sX*`oo$k}QDY0!EIW(RICt&BbhQ&Q5v2uRReKN<-0hL`Y`oL0vDvh{=`Ny8t@
z=I;Iiu60s(S$+b3rN%ByMP8E1q~|Os(Z~2m;vN!CsgDM|wu)yi5k4it>dp}2eR!T&
z87pf>WK^Pfi+#D={j|j@LG3-a)QmHl2)eh1EpEuoSzzL&>*-nMiA2dsl<u~$ND0ri
z7p=Lx$-3=;%iuln+$!xL-At=8y-OG<4S{O5u<Cx_Dl>w07oc|ihR&H!5oh=pe}DB}
z-Z6G1a;rWPzl4}dfa<#dI*%R2Ra1{{)tNdeUh#pNpT#TSXNgRKRHltR9(Kf|fH^_9
z4SiyGE1wJ_h2S1{8??gs6Z6?n%sT)d@Qf^g6^V4F&o$IW(P4a<P)~x5gOk)Bci2?2
znfpKz+%v4I@giR%Boc8y<ip6lOkSRK`Bgd~>By}v;=H80P2_0wkXAzpIzlas{5ztX
zTIhV3DnbR=-&SFAa?*H|SOmkPvI5&PRw&I7HFAw-jN3xEoG#B|mJ&0()D9I8sQ;-u
z8_wz-;973Yd@{D$+#BzZ@T;=~BS{2W@5dy1jQvmA_5~gW^8v{i;nl1>OD`)7`Teiz
z8@%!qmlDMl#e|ikA9?jap@mJ};`Jv!ah=GPxJI(JRgxi?(VtXd)%wWqfIyxgg>{k^
z65s*y%~t3V<pjDKqogI<R~R2`kcs$f4Z|>qthMbJ=9-WnXT_O*;EvI_2ukU!yv`y@
zBCkLkx-P*R0T^zuU1DxYFEcvIJG6`9(wZ46;bVDS#f;@9S793;ke-U*gl4{jO_k7a
zbVfOzpbW?h<f1!I@35vlv#>8W(>@9`?Sa?TDYz7F8%uYo98{Rl%005`8lUEdc<PUg
z`4L7}mL+m7L#r>EJWKyI>IxRm^aV1l<j^0U37`ENR_V9GSq5_9^;$)lpGV;iO?J`V
z#M!MlJ+a(Un`@Q5xE3=HzhZqE`MwZok`4~QdV&eY<Q8dJR)2dw2tse{wTH$wgyFFQ
z)u^a4X%szkajg&*xZ6~Vfd0HGX(hYBaB>kyH(T;%3K^j+>|06ZtTf?zxayDSLu0t5
zWP6GsAOVjYUuN80SV1%A9Z;eHo+918-Q653Y-pR<0z(=(P+uBrh`OqIP|7u|-4gY}
zw-VTgjSs;sdnku&7mpA1H}~Mps98+gV~tLPqKg-$jYkhUXT;90ko|oJ_%P}`N#{ta
zvpykQZk@^R5*&q}4&yIk%t5wR3WwZPnkjw9%~^k&=|HM6g3gws9FRoj;^S^or5l58
z4YkFTKrxMb-Bpl(2Y}`GZuvPPOJV1ZOr-ii*!mJ!!K2hm1aH*X=gk<R7dTaOCESzG
zDt7OHWz$nZc2RchmN|z@M!B6L4u-H(jl%SEeDfo!Yvx(d8f^$|f;fL&(neaNCkU}h
z803d@nR&J*euncfNFs%+!?$U3iE*_b2xA|LT10Ky_>5rkie#ut;G;2Y*{F^+<4bPI
z^ols}{H4E}Dy)ohd=Kd?!WN-MW;i1z{=?oWEbP`2j^rZ88^@KWLYEc4!S+chnag%a
zeVe|?3U4Tl;bGJ;b<%ZIWs#vW`AB?7n{kv(W?xDB@8-exLg8Il`Jkkua|9#h%Z5n~
zg=)%daps5-0(QFeX+liX90%EC^+7!NMcs1*t|4^qqEar0H7<Me##c1b=9<-PHk~6~
zxdc%w2|r99sUIOa={)zM^AR=sn$Nga(n=Yz<~of>BA}vs3LXs`Rw9Cdf+oywbKoL7
z%p#bVcpt1g0o)MFxDfTa4AccLz1({d<k6QH#%9>;P6XOgt7{q=+gp!YJEnnfMOmq&
zTMEZG|1UIV4vJ7m`YfL)&#XXU0y~uIZ^KPvLvm-wIkdxP=?!2XJ<*Ts;Xj)%K2i+;
z09ZZh8wp*pf4lZrSfYlvlcu(?WU~Q4V3<*BVmlZo4~$_iG~6It^MmS&W*Qa^s1Mq#
z|2y8DgKLXukB@^dHg7PJ>iexjzEmIurV!ITJlA`<3xila-Zl@b`LAghnmh(^bc6ZE
zn?S+QIX1)wPzynuCvaxEp~VXrj!PUp-N?hpCJjxr64a(`8!UM6v5TV51-mR82J<tF
zk+bMaee37lo@Uyh5PDPPM9=Cbg(ZwkDy%niw2s|_xycp<uhU5<j-lFmJ^bPCS)HRb
zxRtl>0BJ3nY=PbG+iO+l*FbA2JW?{Q{`m1l##zOp9L%iYIyvfZ#GG^AsLylAh!?JJ
zkRocDQ9MSn_rCivgn4$dxYnS?BOld%=DZ%nYvGOc&vJkTtRC`)bTgUR>=6^;dZHYY
zY>@;&nXS>|METN|JIZ_++F&91(`ql`9ZJ9-^eMp<p_DpPQRN-*X38#2gM0JGp`z~V
zKqD#wV$JZzvjOjsE!}d%o1q78=OVyA^Z0lr>zj?mQ-n9;=iO`y@-l_`K|FdUirxVj
zn!^92g{0{Jm_;bS{}GH30CXTZJDIpDo2l!6vcZ4kBG3r-Z$xHth5?B(8v7qK1RRW4
zMW(jlg(jbg%x#?M;m1ojH*edcXqR|hhdz|hYuzt_XmIUGp(ZO6J^AEUqt`*H<G>T?
zv>m>%%6+C#l;G{Y392Z@60>dLLEsko=<0em1)ts(lV#tOl`FmT@iqs~n8fmP!o!}t
z6_e${P~4YH$H(8S+@x(oc+Zd?BX3korpujZhQHWPlL70p@ln5f_P=LvTo^aTAD3ne
z^0KZAE7mMC8`U<0_V<_$z7Bg7P}1~!qN&1rf9zsf7^>+s&DhJykVF$4)g8^2?omb<
z9~+Nz^t`4HKOv@VULLh~Y;wqu?n&sOMQlDH4!;I&`+529(db*f$bEHBXj}CerxCD>
zNd4Y@{(bo_3N*98gxu*pH0qugF9hw$K9YsO=@F%mKgOeSeSZHA$n?_vC>Ts+hyw$L
zV0J&3ab&AL#V4D}_pr3-4mq~8wC`m|Cg8>UEZvz6P{<nXETZ(tz0C&}3Kbk&&f;75
zCmVr<papb1p?@+#CB)tZeBmHZ`RmxWW|@%T&w;=kAQ>Rzm(l9r1Ad+rF!{D`Z2sp0
zF8r!(eRQgMhxb}kuzTW#8I9>%?OP!UDdhcH5N+|Fb<19TXRRrewz1@zfHSG9e1~;s
zN+;K5$ELqboz8@)8UNVl|FMpLh2;O?pZ{we;$&>9rr`fr$3H?9h)U>eva_@8@{Aj|
z@i?@Ct3d#t$T8fW9y&MFFSQkfcp?N^Yq|Ft^AfifR`i2A*rTAidobwGyS#qvZxeDB
z5VtHC-nR^HZ9F9@-#qVdCS(RZ%*|hR5_j@~e>56ff1R_q@8VcDcbGG+d-I)Xu!kYL
z&733DW=^me%P~HAG(TTk*Z0FMzJn#5lX6K0<5vQI)a>)kDTR?NQvHsx+>C=tXnv$I
zCiGI|OZWDw0dGTjZT=0SZoIf+L_N>f9}L2fW(-C4q&oK$h*~px2b{;Ag>Won3p8W?
z=&t-b(#wa2PkVXwn!aYcD3rKrR12!jn;pIo#+#1pmvu#m$dD5HeY9tKk8Gh6KvM22
z6nQJ&49B<mfZVf_XvN&N$uUKnkU?7tsz!<Q`ee9iQDC0%s|PF0uj|FJ?F~cG(ktZJ
zBw<}fBzd+LNq@`@=Kkn<?*25tZQb#sWqT<i5VC9+<`2>CpJz~Va_8$iw&|Lin_ClK
zwO!%^49gIX&%$~$--b*v?wZVu#QpNFD9;+!-}26Tc-Dg;+DzFNe+}^bRN7X-oWPXp
zzX<S%_p@uSM{;c28q>m63%@M7w_P$F96eHg85SPthKEbZS5_l-y*Rp@g=*b1K6?k0
zEu8ri!mT^E^;!D<`Zmy>x({I`ewTPAL^hs}#4~#qI(yc9elMn+%{CqvDBEM1M}-M<
zkRfiZBL1(w_hCr1z>h6?|CNu=$v^h#!;r|?#Z`f(uK&2F0kVb5ovZ(kd;$hnn)px~
zhbAH64p<!)KL{DPO5<|8tkBS>4wxaUMiyryM1{v8V_Z-V^_QqxsmNd`7(>8?AP?*m
z+cD|7z0~$W;8ioJ$_B#PawWMt!=&u(EbC_YOv3tPi0k!|W=k(vgQmGqvBI*faLhEw
zom`m|<y!Gd=po^O)L+>u!4YwXyCWeX+}k?fk-RaT+|Lw=7mhe)U5N<ZkS5X{OiJ+5
zgpe$3(A;iQO&;nl)iUFFfq=@&_=N(F@fi^YT^|;BaqJ;<b7bT6I2OV%jNK6&X6Zia
zE1fG#wlsE=Ys%sej+D!${X{cH%wLkZrqx5j{wUM;>kJiDC07cVJAOlFMBcU{L@mV%
zk1Wkcr}(kc?L`Ce%GK<F4jlh`U{^@74<@^qJ`mNfQi5Q!%(vC8x?jWC^%v1iF|@tX
zk~(_3vnsdqOR_^)rxZK_L)j-Gx=JMaZQxWKp8oYir`ryTLB}^F+=$5EY;i9fXsTwI
zF|&Axi%CtY@NCD#c3tc1%Vk<jM9{VRMku3tWq%x;F*jt{p%=FCd}`8FW3uQjJA_~j
zOye>SuH0>1%|_yIm~=*gmb;?B&RV=3Y_W2;(3j~*G?EM0$%S;W{DkS)+0DIr$Q#{Q
z&9Uj>oxCBy477U#u)~NFGV63GQ(ATuV|ywN2N;4kd8wGW^D$xpYuI4YSZ4HKEjji9
zUqZ|0gVL_y12S>4dLG<zdTC>z1Hp`<+$GX<2dus!{K`nz2K#iCD-<Y~wNtGbm-s=8
zBEvk~Yng4i4~lifhkCj1(iryNqA^4m-0^01DrXPc?|?}AaVV?a2+O*Tva+&%WF<;A
z&I>fwO!^i1)8ow@b^M-Kgtm@{*v+TT(-NNvr|*{0P3>Cvv$(5@^$x`8u8QbYnM&xR
zeS{2jT`UK#N8&p~@ER|5(9;aICQ$Uei2TRm!g+SNCLUvhcQ3Yu2ua?^`n6R(xmTEB
zC)|i|2QS|aO?2kRn)G0ifJX9gISoqipzc*EqqLbzi88Qv@ANSWHY#7g_l9PB$bEc6
z*TzrB5EpjH9;`$*irser$DyeKweTQ|L8sgH)IX)geO$UqX6+-_#2)hKKxXI2VU5bv
zlWC$AP3wjKs-(C1F|0-8F{FOY_^Ny&Gz*2oh&Vo=?m`ebUqR{$gyCO;u0%rH&#I38
z%==`A%c!wg5Y%Dqtg_UTlERke>=U#x7&2ksoCUcl*~Sq2_gfh?zOKe~b>NywH;Zeq
zJQmHxG<(dnofCd#StSiRRuz#VIh8>lE{k#8r9W%v!9b#kx99FEzTvDq8Wld-;?d3_
z2Fl-QtB#T{R!PpPd@x(3HBK%({v)WWFfWE*i<pSEeY=#uOSB{?%a@aII}!Zo@aNOo
z2^ge<A7AB@laFAsTDCvSxD$h#0X+nRyw9S+uM&z7xW8nTmfIJnf#d2glYp6op$9&x
zHHfGVscSgZC{oN~L75dI_>I9-&~B+Tom2zmu;>_7@<UeNRR<VASk@!uK;0Y)s35O~
zYX)KrI*_~2pPBN3*DI;bmG;~+6)oMoF_@=Z1M)fA);)SN6<=OPQxLJ17zhBxm%Idd
zl>;F$N^s3@3{c~gI5vSk=~cl~ah#dM6?FPeuF;z;Bt<0;dHG3lR=^C^PD}YchT!ok
z566Z8Mf<VJC>OqP{;<l!V)JhVCd<;$?riB65X{^D@U;Ow;a`z5|Giv)KadNZ9A88P
zg4fL*TYXHV`>lbi%ochL7V#Kv{Dw9)7khe<HDtL8W82-m5S8ANhv_@O`my6vj4og1
zK(8w3ZM;r$fP;d^OLQ81>$+eOL`5v!<guzIW&?3?J36kf_sgyvWxGB15U=isVL)RV
zol%1gb{XCNv#u}NKv|Ak(gr*7Jsve#@fIK*vRTTVy#yCof4Yw|#_iydlztok;{;z%
z9DE0{Dvd9B!D(E%5r2yD*vOmJqLw#(u$VJSW8ET%F&O*~2<v#7ccAk-L2sTi{u!Ev
zQxm4zS^5Rux)sBav0zy{=c#7;^s~Z;nIFO|*r;272~mxG{U{?Qu1cf#&zP%!f0fp>
zZP6QSThDwVN3`4LRgTGrTJdURV9Xi76tE)dhwgNT7D9?GpZwSli`bIBdoXh44lllq
zABYA$tG<C?AkG1d&!}}J3VD{X`MW}1R@(2*K)fG6N(o|;t>;*}_sOUsJQT%6o)^Rq
z4BEKJA)<As^s=qchJdN-^?Q|I1Y|L*FSkxIJewIqRLgLTTK)b>M#kY#zqyAn8?ubY
zmBSM-xCoYBIL}s2TSUg<Ln(tfns>d9V>ix9t=x$|S(8T=!eL>g?2HCodqG=1SWbnZ
zA>hK2uo!vd?!@tF*oQ3<MJ19g<L#Ph#2S7RA;V(&rr1IKIb7GMgph~L_~ybDkAL6>
z?#RR9(X0_$X9&+hKzPn?&U%BX-O0yP35xo=MsXn@e;8ad;*BM{o<t&TO;mA`B^c4Y
z%LUG7W^6_#PWfwvV3b2a^tXzZ4G5S~mO<|MMh2}4Gu~He)=OzMQUnzm`9$iy{sfsD
zHbrV9SVoQ#%#n|pJAjVOYzl`!Uat=I#%MK(VSX@AY^LA@N!ztb0gE~S5*?X45%(z#
zx5fjdFFMp9r^U_<pvk=L*ltwM=~9^Znm;8z#VJy@9aS_0<@OiK{OW?BXd6qr&!uuB
zc<3F_=ms%Lx-qJMTqjM2H|IR<E3x0vY(O-ecfkalLDO=~tb1~yEBq9bR2fTd$o>nh
zs<gzNG-@|?4=>sGx|XT|7K8c=*_E8xj+p})g+z*RajstkAdj3Sye>~M1?i$pbzMvW
zI+sWix@`}gj^-HpB|N90!+s`CQi+X;2oL_0T(xdEmhqsR-wa|~pN%N!3G#p<%W<gM
zm7N|p>@gwt7G)4ZGD@U~4dMjN%*7qkw!**v*tPU!9@_C@;VI54c)%dsBX2Zs8Zpwk
zW2eLQvT>S_a<`uCu>=8w3ZrEAC1MZ>t=2bK%7--zI+%8OF!Tqyxe7tznqp17E=qY3
z9*m|@*(jgY3dP_KNOe@<CTbl<6Ps>X;UvD#RZnfZF_4Vc8r}HHtz292iS-2G0T&sI
zpb{`ILMEue_Up6C>*(rUDfq=ohk@7D`qWj3u;-)lrK`U2%m0p`$Yk{_jn3r?Hncl5
zL+&N?+VG<qWgHtlga9p<QYJxHv~6YHPo1;YYN@DiXt2b@20iuZdrt1WfV4PV7&~Vn
za>0haWp!7!fzmogv8_eXi9qHHTuG%ddSRz-1WudJFkBhtTbW#X4vi}AcEY56y(vcG
z7p5TeWH>SQ+rP{X$VmquRAIJk5nC_n>&O>UrTzllsQtKRjbjeZh${^-PYm>-!SS5p
zFcv2RJTN7fY`;HM<pU`hyOjZAaEAo%1=8UjG=$!LF%lZ{2QkQ|k#A?%4`4PVh`g)|
zrYVt{pa<ZRspX4R{c5d@g@>sb5u=)Bx(bj~Sl53_M8+A)*0#!POiE48a?xnv1>D@Q
zXhZNDi|km!+c>tPSya*M2$DXs(ED@?2UR9cIM`^7DXlI&go$8@^XFr(;g6bS?jAh&
zLo0t2r6ORNgpep22!GQZ*4<`S!DBeRMz6+}Sq{@3t~MFKeBg7RS|ciRjG!1CK6Xf~
ze!KJXG$HpO(O^xxuykEHgIJv%f_m~UFWFz0Mo4SO18-=mjPnRWX<GO}e4}4N(!8dK
z#?eXx6_X6X3Pv<{%zhbNX}K8}oSN7Y@EuJD4;|ky`zd$7Lu>f^ke7}`^0^}kX&3f1
zsA`h`E_TII4-Ugj!F>nNY{*&}(-}USwemB^Xhsu|Smk_$aol?jEF&uc^vzul=}Z#r
zse45uYI$;EQsS~owR(3X$w^*KbavJ9k5bo&AWQ>`<}-e@rdP=!Vn?Y}_Y`#0VC`ig
zn*r7#@Pca}Lnt(Y`X|U-Stmhe3Nb9#NAZm4MCNQw1lBvu%n)KW81X9ZPU=tb0QZH9
zcfeG9#$Qc{cfi+jj>#3h&R}}_s3T#e84{>OOaItTR+e`F4A7pSQM-q<lJQ86>9`HK
z<ql|pG^{D93CQ-N_G`L6l7vMo0U$zw1#l*BIG4)?S;7&KaV}uY^al9F@v^C*%GI-@
zagew0Mf;=4Lh=1^aR*`!l^|v$zYoLK;u1?lvWml;TZ=+_xK3_BEVDiGZ_IQ`ZYJ~z
zq2q|qej@!6oK~aD!+q!dkfC=P#Y_emL2#}U1K)e~`!0E+XK`enYCyMLkV6Dav&66;
zhI#^T$xH)Hu#9Cc^jiE97x(-XvON{jEyLNdLl`R4D@1nF@XgHlu8F8OX>%T;sonuh
zYvkqyxJGbnU&-3wi7W?D2y95wDpwnO#pIgmJiOWaA>g2l&mW`BF=xFs?||?^Gg<bv
z8v^yK_$qPFadpv(Q_Q!Yj56XgH>^`=8JA6T`e6=@Y<wcz)9gl(hH)W}H)K=>kscAi
zki)JI>2=T?!o(?u^>3zHnWH+WP1)@+(V@C+ynql9cM;@doKfQj<Q`=_OMm+}jIUT1
z{=X;HJNzCK8MXz2FA9lz&abcuc0zT&B)agSG^*31yw=>{z%Yhu!|fuaU^@Awd3LL8
z8i`QgVgq*Yv<BJe^s-WrPlyaAcA~i@wWqD!hK0;bolQ3uVn;0-0eidDa4Fvd&5HGL
zb15dvZ+sG6VTnWpqgBITGJi>soLu4ISVND@9bkW^MHZfhUHyY;>Is3{5d|4^N6pE}
zv1S{@rf_||xOjbi{SMd>aUqkQ+yiIeO>WzFz^;6rs~jUU9^QsiMvs>YsTGrg$q>H-
zpy!tjbn~*3LuvcelLk!#3nd18ITBR%(M~H3OlrA~j>$F*C1t18X35wBzhuLrM^vX<
zmX8+lX~k#f{(|+4SjHK#Z?`YafL=6LeJcj4n(^YNic*DEov9V8@L(V|opJ(d^vF6H
zt4heGOgt;Qa~wKvUgzQ;lsXFVuRdB@{5KqY%ZS*7(ecVA`b-B4hU?L9dndHhWH0Ek
zgPSWQ3g2B;(}z}1;4Fj&xw`9S8^yaIZ4#gKAE#;1f5PpbMbHs*Lt7x_=a#uHvyMer
zfm~w}z*qFgM?xZHsfRp^-k4U6lorMg*mP*Kp~6V`aY398#dqkfEYVqTTJkdroWR0K
zPZFN$g-C|g{j4<AvPlZ4N}7~#h3Yw%(XfE&6bap&Kob*xcqQp@@i0tG%mXYsFtXf(
zyb%yWb)Dd*D$w9I*@}6vb6@~9&+7v4M*pOMMFDOaz+8Hj&4bx5#t1Fat9zX;?Z$S!
zrPJpuf<G|E2o7!Y+I>naG~7$Mi8fogbjzQHNM`V7?{^0}A0PlQ4MYux2WcGae`lS|
z*%fKNmJQDksFY}ht;bIp4<`1&4XA8LwNth7cc%yE88fNX>Jc}UE8JlbK~<@qG;9!8
zx68%|WxT1c?`UYK<8GKn?BLsgC}^mo*dvXQ+3%#RR3Gqdu-$5(3B-ttcJ1k!e_AU(
z47b32TZ=<lJweixh2Q%9DhbbIa-L0gMt~lb5Fr9G!YDTR>&{&aQD(YUa6-22%wgOy
z8CmF14?IgyFLXe*-4BLbvQ#^>6ULP&zV01eWCn-}(3(j#z(7t;mM8PVJWFvx$*MGA
zWQSVeX1Y@sOF74$X!^qbh0DVxy6p?JV%;6Il$KVUy8DP|!@1Lu{-}`bu#|D^(sJRH
zV5Yh#vbvnxo|s4$Q!^CThoL|cI@ClK!W!*{0-vP&kr!Z{)3_I)^lBBdW@YRvNCOn|
zYqO|m2e~BfOut~JSVbG?FClG|KrWKz*+EGVQ7HyT?|-mZ8{8{Db7{Fgv%t^|A#I51
zIT4RI$A%Vtat9{D5>4ut5cJL}<}ER#w=85Ubm5LjAw-pX|C)Y)RMB_Gq<*56O9rs~
z@EopVX9^8-9PTLQv#ZCC!X6$7scVqJ3{F7Ciguup`xZ^%Yp0=~loe-R$ylpmpvEbE
zNUn>dQn6AD!D#5E1kVbiC)qE}k`8r&x_z%Vwv3jofaHwj>pvnDRZE$-6YoU%7#Cd<
zvfdY+w|#j5pnn+#Q?8){D0A3VVjl+}kRuakJ|XIGxm{?u#kNd)dOM~$WRJ*T^hGl)
zWCDN}MPE?$vVJ1<$U4bxl52+eOwbWD{YAHP<|R#ag0|6EBY@Z*UhGutPQwfiE<)n>
zOAER}$03GY7!N_!jo-<hc8{)`?6Bz|QsS60V8M6AJIKWn`~de#wd_dvl<f<GKU-!x
z!L4Mr%bJB4d-8U64zE^m0r6M>qWGb<+ScIR7;%keQ~MJQQzjd*v{%(Gx1!`1lUf{p
zrst4PI_A!<kx>MQnfWeQLX#90bxThfuqy)80~;%cfNkCgC)m8ql)2@)kRr#Uw9lE{
zb&?8nW&6`}5IcSug4GWh!EfjRPqBp6-MgAUe{KLdv28A35Dd)_yZE`pD1yy6wrvb%
zl}3r<9%Gr2%v4;=vvrpExZy*kQDpo3RpRMC8ysJtO8mvQ+;Y*Q<D7G80Q62*muxim
z?W(R;*M~kuN7Sr3#FmE~n5^iy^eX5|a#e-{Iq>ySunaA|vweRas5SE8^-kR#P3YYr
z{~lbID#2MgeVuIeaK%f|{tFvV$jUaU2|YH!P8jG(2*A2)15_rlX-K`wgjNn3hM2J7
zL=>ZsRUE2pA+w2R5)C3+(?>6as?KurVtY}xoe`uXp~7{nh8c=$ng?Ztg{e4TPpBh{
z6H*j|d@uAR6J?_f73#!j87j4ob)d3?ds~wGt9!vCN_oTh&S4vh+Se4^e(lF$>~Su0
zovMb}#irfLYPU<4GXoRp^qBaOD6z|kiUfkxeDuOnox=7fcrD1f?5R43h9hlX50=OI
z&B7tNh8cH-nt9WJnh6Fw!Q<gzx7zR1wXk1c&8|^wsYm9&kf~(r6b^bLGSc-9r&+v`
z^Efevl<n$T+sbxY(mQ3LuIy=8@`VI9N^x(G9pZ3EnXdy3sW08JESZ~ftWF<8vzST{
zQPg?SyA*cma`)L3SE1S70qXO54Kb61Gm`3zJVTC1kbizGcQ~dtt0mcY#3gId*tlOH
zPU$6}KBSp`-3~6s)2JJrC>-M9dP)oP-N0v_eo#@`+}v*UC06{vKsflq9x(&ND91Yv
zr9D+~eN7}Ayj*L>4w_S@c7zzC_+=mJ%HYx|cg#UfF1vc9{4`@={Jp8*b-XSCJ@H2E
zCzdxWCBfW4SUrlIj!4hsjgoGIdZY4q=^#pY96vI^DVs`zQkQzI7@aq}ZJZrYn7@)H
zhv5pcLN<HgfLhf6q;3dgxj3iHzs7|UPIczb7o*zG+KI;gI&NNTbO?To(Qs)#<ir_B
Z>#f`PR8J4l%_Ta0Fcpc+TlK#7{{Rx)uuA{{

literal 0
HcmV?d00001

diff --git a/src/test/resources/log4j2.properties b/src/test/resources/log4j2.properties
new file mode 100644
index 0000000..604d9cb
--- /dev/null
+++ b/src/test/resources/log4j2.properties
@@ -0,0 +1,17 @@
+appender.out.type = Console
+appender.out.name = out
+appender.out.layout.type = PatternLayout
+appender.out.layout.pattern = %d{HH:mm:ss} %level {%thread} %-30.30c{1} : %msg%n
+appender.out.filter.threshold.type = ThresholdFilter
+appender.out.filter.threshold.level = info
+
+appender.file.type = File
+appender.file.name = file
+appender.file.append = true
+appender.file.fileName = target/test.log
+appender.file.layout.type = PatternLayout
+appender.file.layout.pattern = %d{HH:mm:ss.SSS} %-5level {%thread} %-30.30c{1} : %msg%n
+
+rootLogger.level = info
+rootLogger.appenderRef.out.ref = out
+rootLogger.appenderRef.file.ref = file
diff --git a/src/test/resources/testdata/saved_model_half_plus_two_cpu/00000123/assets/foo.txt b/src/test/resources/testdata/saved_model_half_plus_two_cpu/00000123/assets/foo.txt
new file mode 100644
index 0000000..f9ff036
--- /dev/null
+++ b/src/test/resources/testdata/saved_model_half_plus_two_cpu/00000123/assets/foo.txt
@@ -0,0 +1 @@
+asset-file-contents
\ No newline at end of file
diff --git a/src/test/resources/testdata/saved_model_half_plus_two_cpu/00000123/saved_model.pb b/src/test/resources/testdata/saved_model_half_plus_two_cpu/00000123/saved_model.pb
new file mode 100644
index 0000000000000000000000000000000000000000..8aca300f0f7c330aa4695a1a4beacfb3876769ff
GIT binary patch
literal 12107
zcmeGiU2oggRg^4II+j0j?KsMH(+pbEx(r~+vfHLYho;J!eE{{cCCj>xnxIJ9AuNlk
zNGEnNpu@0-71+bDtyqU)z<$8q_P)nG?9c3V!+>q)-b+&CB`HfzvA!%pB7R(+?{n@s
z=boxU{(1nvSt9?eLl;W#SQe>(a@JHHvEOt2rTF_1{x&xOA1?UkBfGl_%boi2no?Y-
zluNZLQQ-_K?|7bbG$e~q8rz5X!A}&00F<8^gHxNq!5jC~AD#Mo^uAe(U^Xc%<5I^m
zd|UUP7$ci3^j&vAi(5+b8Z4fAwmxvpV`q3YdGVQ|z#S;xbBCT!SD+%`H_H~Ivbho<
zCl%iwdhWQn1mcb00=A+04a*+-j(^Swb%sbABXltWgj`Q31RKbE_V~zt+`0F}HjhWH
zGxR-j4c5%)Rv#g6&YA5sRI&lYvOV7!8ouKWgEtKo<+x(m0~^_N2bOL*V{gLH&yk_8
zK<T~H!JKNi!u9Y={Ja7S`|h_!@E$BYFwA4J3>B^d4_VWc{UmpFQ1(d)`Pfq0N{m;O
z-GXuEc>EnLHVW|fBGexkW6yr~%s3ehY=jG*J$6uKAK4bNTZ8h@IN_W(V9^_~wfeDr
zj-t@ieQ}R60hIXUMP;Agk_Ie`=azkFoDO`iQ+dQ6&`nqlU^zp}F%f<(0!MGaQt(o+
z*r^A%O#dFG(Q9r{iBC76E-(gP2Kbt*5D_cnf}#ki3?AHq1R4SZ$Md%s%2&6Pt_tN&
zNvWyLn-IXvTW}*JK~P5)IUIh2fZ!f1J{TCL{RGX#9;eMaY0b@bC=0g{KXjp6Y$)(P
zY{Y<k&+(spRY(~17g)UTi3{Q5`Z#>}A*kQ6#S?Uxz{4;?px5!I&B`PGn>4tIJE6k|
zD}+1JV!PBu<LwknHE^MZz<qa&&W65O5pIQ6whDEDN)L@~9acFW-56TBH{d3s(kq~f
zb!bUE8hFhH1XoFYv=!!b1E@SSo?a3gnHSsFVddcyV{F;heP>`ZPDmZ9eZ%9|!kLh%
zvDGLsKO3H&u#h;myhe%Q!o%RBh6?)t5B)I)#cyIXB1=%=QH=Q<x;hF@L_~}_Nl(89
zHP1#DHBiK);&BMHNDANqJT}m)`va5(+60f~!a^{fHzgyGA}Yq|n&}RG!x`EMu+8fj
z5s(4f;%bXyMBz8f7)YMlT4iT@XK%aRQBQ}*L-*N`{OvxNuwt~Fq2ptq(YdP<5$>By
zEz5q2+V0+a@ZD}(TMT1+v8`wuu&f7r4Wh+RKo+3bEofK69iiTXszM6dLZMK28+=d<
z65~4qHU~o(jFmW#D6-iwUr7ppiTYCoszxi=HG|!gDh8DqmZ?Ed@y!tT$6S6@w!Jce
ztfw|t_SrgAbaP;M9=ceSRB(Ik-Zjv%D@JQ<8y1PSM<h$8BJ<k|31Rx|&!o>xRo$IQ
z*i4`O3-rlj0vAo5t)w-^kmuV0*JnSa&pwqIPD@fweDZYD*Dj%Zrq6y#pM9B%n5m?V
zqMxT&rcd+I`n)^4KFtgCnXi|bOoj;-K|}#6^iMOTPm{_F%aWYk*UZc6(@g2pOjD6o
zqnW2hEz~|70QiTx9prfN1Vl!F>4y+HPc-6ERy9oLr1*;)JI_ZfAn`1+#Z&j3kbWhM
z*417Gqjf6Q=3v3l>cUzA3#Os^Stke0^$^W!ZyC`nkYZKg8-o<uu|ScSszRm9m*cic
z69|LMs3jzny<=^Jc$a#M)sn#5Rg}0SOWHJ~zfxd53o8i|Gnyp@I)zeM$Dvth^(a@v
zENv=LAHvN9O^F+xv*oBEwt?pw5bm}KqfnVqz%zI`fhBPxl6+Q&=<OWLBveGLr&d)`
z=qYD$C8&8`gGKLbh<9=juZ4)ASLKL{XPwzF|GNm+6KuwBNTN`qF^8rX>4wh6P<zI8
zJ2b=FX{OQ2rq@k*+ZMdsy+RhN{|WzKNT@0qk2E3`XhkS!t5G$^YYg<W7p3xR8S28D
z#sAvUq!Ppen7KeutQH$ZaIi@d%a8JvA@dcv)GH|lufkdeH}F{W16Wgl2ALVf4vm!m
z&JXu}(e7bAA1Z*0G~xLnKg7;aOgId~^8<Y!$|wPShkWjW_z{$mLI#<MLSUg4vBy3?
z-H)Mo-ib*=;F96aY7LW}dH@(LVsJO(A)pn%>F&tb<~J4#R*lZ36TQOQ@vxR#ufQo3
z4XpPk6{t-3)C_zk99fy(?-ZyZcTy>aq|g-8GC`3fv$MXdQH8Y#HKK|Nt20O1hY6XA
zq)Fu%l9o$I4$7P|2T-x*<rO<?29Jt0zaYgTcd}wlRIJRjEYC|+S~*pAmO{)9Q!LUT
zE7nXYHv99uoMaLoejwsyN5sp}h$sdPnbrAY&j%u6BJ3z)c4)|s3_fy^S@GbGS3KX1
zg*F~CQ)JH%*9ndaf^<yV3-hnry;phu_2rg-GHSVF=Loy@gO+_Za>srP8zi>xwGLf(
z+duQ;Q6@h%wBbhBrPD<tGBbG21Pse5af(7#dP|&6HJ}CSjui<_JbE(dvlYg<jAVt&
zzbLRFVU=A>rixCNMrgHajte!TFxr3(vd+V(M8P~U^(m}NU(0?d)341!K~9K`n1e@9
z7on&X?_@r&4g3Mme542`lTQ<PBZ59pX&3S$5^}x38n}cXOVQdv*z=@t5N*2nIBcEa
zW!QDW6;bq-q6%yR9F5)6k#5-|Z11uoFRC60Il%W}&BJbfE7UUj9-i%mZl5T;S-n?2
zaq&cMCOyCq&y?bzNzVBS&7vUbkco;n$9VA25jZ~4D9J;Xr0Ei9ex^W!<C&Z$2%&kH
z7mNOd_Es2%c6&QK4Bf`ib~4i6c(Y@^x%=AA-fKI?p4r~J)3)~B*y|j=-hb0L+}nM<
zH8RFChRsu0k*FZC^9>pk1<fUwnfE@yB8|1`C#M{&eUvBUZwF{62Q4AbF&&liKF4}9
zh|jEnP$C8F;=nOb!3jIJnVj>mj?mZ&pi4F#H^6z272p@x3ChAo<YOi(i<eHJGFUtj
zicD{wI6n?;iJz#o;s<s)oWB%GaI<$EO^Y?v%00#CVWQhp`aQ+$Vg4AGk2<9ZRfZqK
z3ql-%0KNyt&Ifadi0OJNc<n|=?Rt+y)E0WdU-(SHW-^Nr4>@`goGj8z6@_-378RY!
z13M{V6^hcQib+vAvU1j<<IWnWowCCIz#kR3#z|wxbYpkGqNhE?9+??0*s}?(AH<lF
zu?5MPs=X3z)i+?1T<2T0_%7)PdCmlXQs6fKAU$i*${4gDe99qDwu0m|voiTp+N>G`
zqY$hK6tNk^7=^i>kZ77*k})nq=ix0L?_`Rvrl&~o*a$vRg8eC)Y?&P5C8DR0Oqyug
zm{n$xEb_v@PZikYnv9O}zkpEiG2E1B_#a1uKPx!F!lFSW1u~g!h-;Wm6BfOQ3)w!h
z++W*D^&B@~ot?|b|KPV_43jcTj5-oGGUDt;NcM5@C5mjobXR0Mf-+1`!n1SUut|z~
zO6x0|&a7(Nn5$v)dx^eIL?4FgRf)V*)DE0#o)i0dPK80HZg)bRnWAp@^18j;(Wb21
zy@)#8;8Nu6$>d!iye!>fRx!H~;z<+X3LJ~sBr%i|6d%F%D6En$izph}sC3_v?suho
znKO6`hn!dp2@xBEL4bBmAPQgb^iq;bvsC?5p>N@#2m`?jiEf*SOfcLgy6r%`At1!?
zrSg{YTH)>s_wN*&FX9{yYlG;lq#7Ci6AuSHF|i1sHGf@%HGhnQKWuUc2X({<kw^Ze
zkbfvRX<0H5Wi;+W@rrf6y6w&tX}qV~zQfn|+4`s^#H(<<G(HlpH`)5AaKx){y)>S{
z*LQGB@8AcMt}kVm!6yVS6C+3AHzxlkDoJTAXy73m1TRzHG(Raq?f<f{nf#i!L``n#
zyx7)H&{lMJgyU`XcqQNbK)6`Aj2oRtzNWp4^N_dMjOF%`>mChkW8`?-rh5{k6o&)%
z**1Ec?cYZO#TiGk9y3o8Hq5jSvC_j~GaQ)1)R&K%k^{KO4)Z9K&bsYYg{IFK2`)Hi
z3(j*EP%3I1Mnx-t-@=(i99Z?Wr#K?RhOC@J97522S3f5vyckaEA)WQf#0A1vDelMQ
zC8&6&i_SO|?Pdnr5SR80R>t-bg75;g9b#RG_hnGU;G9P`Vc{GNK2su@3<ub>Q&>(S
oo`K(66TI`vIYj$0tVU=%T+X}X7;Z&^Mt;un_#qPCyO-emzqLMxr2qf`

literal 0
HcmV?d00001

diff --git a/src/test/resources/testdata/saved_model_half_plus_two_cpu/00000123/variables/variables.data-00000-of-00001 b/src/test/resources/testdata/saved_model_half_plus_two_cpu/00000123/variables/variables.data-00000-of-00001
new file mode 100644
index 0000000000000000000000000000000000000000..a378d172b66af3595a20b49a506ef16b03d8d017
GIT binary patch
literal 20
TcmZQzV6bO^U<U>U2QUTz4K@M4

literal 0
HcmV?d00001

diff --git a/src/test/resources/testdata/saved_model_half_plus_two_cpu/00000123/variables/variables.index b/src/test/resources/testdata/saved_model_half_plus_two_cpu/00000123/variables/variables.index
new file mode 100644
index 0000000000000000000000000000000000000000..5ae2cc0f2d45a17b3b86dbac99d1b2a64e2be29a
GIT binary patch
literal 172
zcmYevVPIh6V3cCwU}RwAPUK(|V$fhQWwTJ1XJq6xV&rF0VBr<yVPND<VinZk07?n`
zt?&nOlUW62c!1pZ*P0HC8ruj!nG8U{2*S*_>||jah6A4#ZeU<!N?{0x3zdGK(;mZ-
S$p;c31j2tebgPuQ-v$7#(;A!r

literal 0
HcmV?d00001

diff --git a/src/test/resources/tfsc4j.properties b/src/test/resources/tfsc4j.properties
new file mode 100644
index 0000000..5523cc0
--- /dev/null
+++ b/src/test/resources/tfsc4j.properties
@@ -0,0 +1,2 @@
+#target = <target>
+#credentials = <credentials>