Skip to content

Commit

Permalink
Fix upstream TF sync (#2625)
Browse files Browse the repository at this point in the history
The CI sync from the upstream TF has been failing for the last month due
to the relocation of certain TFLite files. In particular, some schema
files were relocated to tensorflow/compiler/mlir/lite subfolder. This PR
mirrors the migration and adds a few redirection headers to ensure
source compatibility for now.

This PR also includes a TF sync as well to get us caught up again. While
that could have been done separately, this is done together to ensure
everything continues to build with the relocations.

BUG=b/351824449
  • Loading branch information
rascani authored Jul 15, 2024
1 parent 0cc2e49 commit ff5c090
Show file tree
Hide file tree
Showing 17 changed files with 187 additions and 58 deletions.
4 changes: 2 additions & 2 deletions ci/sync_from_upstream_tf.sh
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,8 @@ git checkout tensorflow/lite/kernels/internal/tensor_utils.cc
bazel build tensorflow/lite/python:schema_py
/bin/cp bazel-bin/tensorflow/lite/python/schema_py_generated.py tensorflow/lite/python/schema_py_generated.py

bazel build tensorflow/lite/schema:schema_fbs_srcs
/bin/cp ./bazel-bin/tensorflow/lite/schema/schema_generated.h tensorflow/lite/schema/schema_generated.h
bazel build tensorflow/compiler/mlir/lite/schema:schema_fbs_srcs
/bin/cp ./bazel-bin/tensorflow/compiler/mlir/lite/schema/schema_generated.h tensorflow/lite/schema/schema_generated.h

# Must clean the bazel directories out after building as we don't check these in.
bazel clean
Expand Down
5 changes: 3 additions & 2 deletions ci/tflite_files.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
tensorflow/compiler/mlir/lite/schema/schema.fbs
tensorflow/compiler/mlir/lite/schema/schema_utils.h
tensorflow/compiler/mlir/lite/schema/schema_utils.cc
tensorflow/lite/array.h
tensorflow/lite/array.cc
tensorflow/lite/builtin_op_data.h
Expand All @@ -12,7 +15,6 @@ tensorflow/lite/kernels/internal/quantization_util.cc
tensorflow/lite/kernels/internal/tensor_ctypes.cc
tensorflow/lite/kernels/internal/reference/comparisons.cc
tensorflow/lite/kernels/kernel_util.cc
tensorflow/lite/schema/schema_utils.cc
tensorflow/lite/c/builtin_op_data.h
tensorflow/lite/c/c_api_types.h
tensorflow/lite/c/common.h
Expand Down Expand Up @@ -108,7 +110,6 @@ tensorflow/lite/kernels/padding.h
tensorflow/lite/portable_type_to_tflitetype.h
tensorflow/lite/python/schema_util.py
tensorflow/lite/schema/schema_utils.h
tensorflow/lite/schema/schema.fbs
tensorflow/lite/tools/flatbuffer_utils.py
tensorflow/lite/tools/flatbuffer_utils_test.py
tensorflow/lite/tools/randomize_weights.py
Expand Down
10 changes: 10 additions & 0 deletions tensorflow/compiler/mlir/lite/kernels/internal/BUILD
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
package(
default_visibility = ["//visibility:public"],
licenses = ["notice"],
)

cc_library(
name = "compatibility_macros",
hdrs = ["compatibility_macros.h"],
deps = ["//tensorflow/lite/kernels/internal:compatibility"],
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

#ifndef TENSORFLOW_COMPILER_MLIR_LITE_KERNELS_INTERNAL_COMPATABILITY_MACROS_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_KERNELS_INTERNAL_COMPATABILITY_MACROS_H_

#include "tensorflow/lite/kernels/internal/compatibility.h" // IWYU pragma: keep

#endif // TENSORFLOW_COMPILER_MLIR_LITE_KERNELS_INTERNAL_COMPATABILITY_MACROS_H_
41 changes: 41 additions & 0 deletions tensorflow/compiler/mlir/lite/schema/BUILD
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
load("@flatbuffers//:build_defs.bzl", "flatbuffer_cc_library")

package(
default_visibility = [
"//visibility:public",
],
licenses = ["notice"],
)

exports_files(
srcs = ["schema.fbs"],
)

flatbuffer_cc_library(
name = "schema_fbs",
srcs = ["schema.fbs"],
)

# Generic schema for inference on device (but with reflections makes bigger).
flatbuffer_cc_library(
name = "schema_fbs_with_reflection",
srcs = ["schema.fbs"],
flatc_args = [
"--reflect-types",
"--reflect-names",
"--no-union-value-namespacing",
"--gen-object-api",
],
out_prefix = "reflection/",
)

cc_library(
name = "schema_utils",
srcs = ["schema_utils.cc"],
hdrs = ["schema_utils.h"],
deps = [
":schema_fbs",
"//tensorflow/compiler/mlir/lite/kernels/internal:compatibility_macros",
"@flatbuffers//:runtime_cc",
],
)
File renamed without changes.
22 changes: 22 additions & 0 deletions tensorflow/compiler/mlir/lite/schema/schema_generated.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_GENERATED_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_GENERATED_H_

// This file should only be used by the make build to redirect schema_utils.cc
// usage of the generated schema to the proper location.
#include "tensorflow/lite/schema/schema_generated.h" // IWYU pragma: keep

#endif // TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/schema/schema_utils.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h"

#include <algorithm>

#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/compiler/mlir/lite/kernels/internal/compatibility_macros.h"

namespace tflite {

Expand Down
33 changes: 33 additions & 0 deletions tensorflow/compiler/mlir/lite/schema/schema_utils.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_UTILS_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_UTILS_H_

#include "flatbuffers/flatbuffers.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"

namespace tflite {

// The following methods are introduced to resolve op builtin code shortage
// problem. The new builtin operator will be assigned to the extended builtin
// code field in the flatbuffer schema. Those methods helps to hide builtin code
// details.
BuiltinOperator GetBuiltinCode(const OperatorCode *op_code);

BuiltinOperator GetBuiltinCode(const OperatorCodeT *op_code);

} // namespace tflite

#endif // TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_UTILS_H_
12 changes: 0 additions & 12 deletions tensorflow/lite/core/macros.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,16 +65,4 @@ limitations under the License.
#define TFLITE_HAS_ATTRIBUTE_WEAK 0
#endif

#ifndef TF_LITE_STATIC_MEMORY
// maximum size of a valid flatbuffer
inline constexpr unsigned int flatbuffer_size_max = 2147483648;
// If none zero then the buffer is stored outside of the flatbuffers, string
inline constexpr char tflite_metadata_buffer_location[] = "buffer_location";
// field for minimum runtime version, string
inline constexpr char tflite_metadata_min_runtime_version[] =
"min_runtime_version";
// the stablehlo op version is supported by the tflite runtime
inline constexpr char tflite_supported_stablehlo_version[] = "1.0.0";
#endif

#endif // TENSORFLOW_LITE_CORE_MACROS_H_
6 changes: 6 additions & 0 deletions tensorflow/lite/kernels/internal/portable_tensor_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,12 @@ void ApplySignbitToVector(const float* __restrict__ vector, int v_size,

void UnpackDenseInt4IntoInt8(const int8_t* src_buffer, int num_elements,
int8_t* dst_buffer) {
// num_elements means the number of elements regardless of packed or unpacked.
// For example, 3 elements means both
// 1) Packed: 3 int4's = 12 bit -> 16 bits (padded) = 2 bytes.
// stored in src_buffer[0] and src_buffer[1] (i = 0..1)
// 2) Unpacked: 3 int8's = 3 bytes.
//. stored in dst_buffer[0], dst_buffer[1] and dst_buffer[2] (j = 0..2)
for (int i = 0; i < num_elements / 2; i++) {
int8_t byte = src_buffer[i];
// Shift left first so that sign is properly extended when shifted right
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/lite/micro/tools/make/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -499,10 +499,10 @@ $(wildcard $(TENSORFLOW_ROOT)tensorflow/lite/micro/testing/*.h)
# The explicitly specified list of sources and headers that are shared between
# TfLite and TFLM are in the ci/sync_from_upstream_tf.sh script.
TFL_CC_SRCS := \
$(shell find $(TENSORFLOW_ROOT)tensorflow/lite -type d \( -path $(TENSORFLOW_ROOT)tensorflow/lite/experimental -o -path $(TENSORFLOW_ROOT)tensorflow/lite/micro \) -prune -false -o -name "*.cc" -o -name "*.c")
$(shell find $(TENSORFLOW_ROOT)tensorflow -type d \( -path $(TENSORFLOW_ROOT)tensorflow/lite/experimental -o -path $(TENSORFLOW_ROOT)tensorflow/lite/micro \) -prune -false -o -name "*.cc" -o -name "*.c")

TFL_CC_HDRS := \
$(shell find $(TENSORFLOW_ROOT)tensorflow/lite -type d \( -path $(TENSORFLOW_ROOT)tensorflow/lite/experimental -o -path $(TENSORFLOW_ROOT)tensorflow/lite/micro \) -prune -false -o -name "*.h")
$(shell find $(TENSORFLOW_ROOT)tensorflow -type d \( -path $(TENSORFLOW_ROOT)tensorflow/lite/experimental -o -path $(TENSORFLOW_ROOT)tensorflow/lite/micro \) -prune -false -o -name "*.h")

ifneq ($(BUILD_TYPE), no_tf_lite_static_memory)
EXCLUDED_TFL_CC_SRCS := \
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/lite/python/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ package(

flatbuffer_py_library(
name = "schema_py",
srcs = ["//tensorflow/lite/schema:schema.fbs"],
srcs = ["//tensorflow/compiler/mlir/lite/schema:schema.fbs"],
)

py_library(
Expand Down
9 changes: 3 additions & 6 deletions tensorflow/lite/schema/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,13 @@ package(
# bazel build schema_fbs_srcs.
flatbuffer_cc_library(
name = "schema_fbs",
srcs = ["schema.fbs"],
srcs = ["//tensorflow/compiler/mlir/lite/schema:schema.fbs"],
)

# Generic schema for inference on device (but with reflections makes bigger).
flatbuffer_cc_library(
name = "schema_fbs_with_reflection",
srcs = ["schema.fbs"],
srcs = ["//tensorflow/compiler/mlir/lite/schema:schema.fbs"],
flatc_args = [
"--reflect-types",
"--reflect-names",
Expand All @@ -29,11 +29,8 @@ flatbuffer_cc_library(

cc_library(
name = "schema_utils",
srcs = ["schema_utils.cc"],
hdrs = ["schema_utils.h"],
deps = [
":schema_fbs",
"//tensorflow/lite/kernels/internal:compatibility",
"@flatbuffers//:runtime_cc",
"//tensorflow/compiler/mlir/lite/schema:schema_utils",
],
)
15 changes: 1 addition & 14 deletions tensorflow/lite/schema/schema_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,19 +15,6 @@ limitations under the License.
#ifndef TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_
#define TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_

#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/schema/schema_generated.h"

namespace tflite {

// The following methods are introduced to resolve op builtin code shortage
// problem. The new builtin operator will be assigned to the extended builtin
// code field in the flatbuffer schema. Those methods helps to hide builtin code
// details.
BuiltinOperator GetBuiltinCode(const OperatorCode *op_code);

BuiltinOperator GetBuiltinCode(const OperatorCodeT *op_code);

} // namespace tflite
#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h" // IWYU pragma: keep

#endif // TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_
55 changes: 39 additions & 16 deletions tensorflow/lite/tools/flatbuffer_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,9 +58,38 @@ def read_model(input_tflite_file):
raise RuntimeError('Input file not found at %r\n' % input_tflite_file)
with gfile.GFile(input_tflite_file, 'rb') as input_file_handle:
model_bytearray = bytearray(input_file_handle.read())
return read_model_from_bytearray(model_bytearray)


def read_model_from_bytearray(model_bytearray):
"""Reads a tflite model as a python object.
Args:
model_bytearray: TFLite model in bytearray format.
Returns:
A python object corresponding to the input tflite file.
"""
model = convert_bytearray_to_object(model_bytearray)
if sys.byteorder == 'big':
byte_swap_tflite_model_obj(model, 'little', 'big')

# Offset handling for models > 2GB
for buffer in model.buffers:
if buffer.offset:
buffer.data = model_bytearray[buffer.offset : buffer.offset + buffer.size]
buffer.offset = 0
buffer.size = 0
for subgraph in model.subgraphs:
for op in subgraph.operators:
if op.largeCustomOptionsOffset:
op.customOptions = model_bytearray[
op.largeCustomOptionsOffset : op.largeCustomOptionsOffset
+ op.largeCustomOptionsSize
]
op.largeCustomOptionsOffset = 0
op.largeCustomOptionsSize = 0

return model


Expand Down Expand Up @@ -294,14 +323,10 @@ def byte_swap_buffer_content(buffer, chunksize, from_endiness, to_endiness):
buffer.data[i : i + chunksize]
for i in range(0, len(buffer.data), chunksize)
]
buffer.data = b''.join(
[
int.from_bytes(byteswap, from_endiness).to_bytes(
chunksize, to_endiness
)
for byteswap in to_swap
]
)
buffer.data = b''.join([
int.from_bytes(byteswap, from_endiness).to_bytes(chunksize, to_endiness)
for byteswap in to_swap
])


def byte_swap_string_content(buffer, from_endiness, to_endiness):
Expand All @@ -314,14 +339,12 @@ def byte_swap_string_content(buffer, from_endiness, to_endiness):
"""
num_of_strings = int.from_bytes(buffer.data[0:4], from_endiness)
string_content = bytearray(buffer.data[4 * (num_of_strings + 2) :])
prefix_data = b''.join(
[
int.from_bytes(buffer.data[i : i + 4], from_endiness).to_bytes(
4, to_endiness
)
for i in range(0, (num_of_strings + 1) * 4 + 1, 4)
]
)
prefix_data = b''.join([
int.from_bytes(buffer.data[i : i + 4], from_endiness).to_bytes(
4, to_endiness
)
for i in range(0, (num_of_strings + 1) * 4 + 1, 4)
])
buffer.data = prefix_data + string_content


Expand Down
2 changes: 1 addition & 1 deletion third_party/flatbuffers/build_defs.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ def flatbuffer_cc_library(
reflection binaries for the schemas.
'''
output_headers = [
(out_prefix + "%s_generated.h") % (s.replace(".fbs", "").split("/")[-1])
(out_prefix + "%s_generated.h") % (s.replace(".fbs", "").split("/")[-1].split(":")[-1])
for s in srcs
]
reflection_name = "%s_reflection" % name if gen_reflections else ""
Expand Down

0 comments on commit ff5c090

Please sign in to comment.