diff --git a/ci/sync_from_upstream_tf.sh b/ci/sync_from_upstream_tf.sh index 46eee4e21f8..bb5d098a75c 100755 --- a/ci/sync_from_upstream_tf.sh +++ b/ci/sync_from_upstream_tf.sh @@ -58,8 +58,8 @@ git checkout tensorflow/lite/kernels/internal/tensor_utils.cc bazel build tensorflow/lite/python:schema_py /bin/cp bazel-bin/tensorflow/lite/python/schema_py_generated.py tensorflow/lite/python/schema_py_generated.py -bazel build tensorflow/lite/schema:schema_fbs_srcs -/bin/cp ./bazel-bin/tensorflow/lite/schema/schema_generated.h tensorflow/lite/schema/schema_generated.h +bazel build tensorflow/compiler/mlir/lite/schema:schema_fbs_srcs +/bin/cp ./bazel-bin/tensorflow/compiler/mlir/lite/schema/schema_generated.h tensorflow/lite/schema/schema_generated.h # Must clean the bazel directories out after building as we don't check these in. bazel clean diff --git a/ci/tflite_files.txt b/ci/tflite_files.txt index 51381ce7447..65141375150 100644 --- a/ci/tflite_files.txt +++ b/ci/tflite_files.txt @@ -1,3 +1,6 @@ +tensorflow/compiler/mlir/lite/schema/schema.fbs +tensorflow/compiler/mlir/lite/schema/schema_utils.h +tensorflow/compiler/mlir/lite/schema/schema_utils.cc tensorflow/lite/array.h tensorflow/lite/array.cc tensorflow/lite/builtin_op_data.h @@ -12,7 +15,6 @@ tensorflow/lite/kernels/internal/quantization_util.cc tensorflow/lite/kernels/internal/tensor_ctypes.cc tensorflow/lite/kernels/internal/reference/comparisons.cc tensorflow/lite/kernels/kernel_util.cc -tensorflow/lite/schema/schema_utils.cc tensorflow/lite/c/builtin_op_data.h tensorflow/lite/c/c_api_types.h tensorflow/lite/c/common.h @@ -108,7 +110,6 @@ tensorflow/lite/kernels/padding.h tensorflow/lite/portable_type_to_tflitetype.h tensorflow/lite/python/schema_util.py tensorflow/lite/schema/schema_utils.h -tensorflow/lite/schema/schema.fbs tensorflow/lite/tools/flatbuffer_utils.py tensorflow/lite/tools/flatbuffer_utils_test.py tensorflow/lite/tools/randomize_weights.py diff --git a/tensorflow/compiler/mlir/lite/kernels/internal/BUILD b/tensorflow/compiler/mlir/lite/kernels/internal/BUILD new file mode 100644 index 00000000000..4ebbc21d4df --- /dev/null +++ b/tensorflow/compiler/mlir/lite/kernels/internal/BUILD @@ -0,0 +1,10 @@ +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], +) + +cc_library( + name = "compatibility_macros", + hdrs = ["compatibility_macros.h"], + deps = ["//tensorflow/lite/kernels/internal:compatibility"], +) diff --git a/tensorflow/compiler/mlir/lite/kernels/internal/compatibility_macros.h b/tensorflow/compiler/mlir/lite/kernels/internal/compatibility_macros.h new file mode 100644 index 00000000000..5d073805128 --- /dev/null +++ b/tensorflow/compiler/mlir/lite/kernels/internal/compatibility_macros.h @@ -0,0 +1,21 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_COMPILER_MLIR_LITE_KERNELS_INTERNAL_COMPATABILITY_MACROS_H_ +#define TENSORFLOW_COMPILER_MLIR_LITE_KERNELS_INTERNAL_COMPATABILITY_MACROS_H_ + +#include "tensorflow/lite/kernels/internal/compatibility.h" // IWYU pragma: keep + +#endif // TENSORFLOW_COMPILER_MLIR_LITE_KERNELS_INTERNAL_COMPATABILITY_MACROS_H_ diff --git a/tensorflow/compiler/mlir/lite/schema/BUILD b/tensorflow/compiler/mlir/lite/schema/BUILD new file mode 100644 index 00000000000..f54726bd499 --- /dev/null +++ b/tensorflow/compiler/mlir/lite/schema/BUILD @@ -0,0 +1,41 @@ +load("@flatbuffers//:build_defs.bzl", "flatbuffer_cc_library") + +package( + default_visibility = [ + "//visibility:public", + ], + licenses = ["notice"], +) + +exports_files( + srcs = ["schema.fbs"], +) + +flatbuffer_cc_library( + name = "schema_fbs", + srcs = ["schema.fbs"], +) + +# Generic schema for inference on device (but with reflections makes bigger). +flatbuffer_cc_library( + name = "schema_fbs_with_reflection", + srcs = ["schema.fbs"], + flatc_args = [ + "--reflect-types", + "--reflect-names", + "--no-union-value-namespacing", + "--gen-object-api", + ], + out_prefix = "reflection/", +) + +cc_library( + name = "schema_utils", + srcs = ["schema_utils.cc"], + hdrs = ["schema_utils.h"], + deps = [ + ":schema_fbs", + "//tensorflow/compiler/mlir/lite/kernels/internal:compatibility_macros", + "@flatbuffers//:runtime_cc", + ], +) diff --git a/tensorflow/lite/schema/schema.fbs b/tensorflow/compiler/mlir/lite/schema/schema.fbs similarity index 100% rename from tensorflow/lite/schema/schema.fbs rename to tensorflow/compiler/mlir/lite/schema/schema.fbs diff --git a/tensorflow/compiler/mlir/lite/schema/schema_generated.h b/tensorflow/compiler/mlir/lite/schema/schema_generated.h new file mode 100644 index 00000000000..7eeedf868d2 --- /dev/null +++ b/tensorflow/compiler/mlir/lite/schema/schema_generated.h @@ -0,0 +1,22 @@ +/* Copyright 2024 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_GENERATED_H_ +#define TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_GENERATED_H_ + +// This file should only be used by the make build to redirect schema_utils.cc +// usage of the generated schema to the proper location. +#include "tensorflow/lite/schema/schema_generated.h" // IWYU pragma: keep + +#endif // TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_ diff --git a/tensorflow/lite/schema/schema_utils.cc b/tensorflow/compiler/mlir/lite/schema/schema_utils.cc similarity index 94% rename from tensorflow/lite/schema/schema_utils.cc rename to tensorflow/compiler/mlir/lite/schema/schema_utils.cc index 285873de24d..a173380940d 100644 --- a/tensorflow/lite/schema/schema_utils.cc +++ b/tensorflow/compiler/mlir/lite/schema/schema_utils.cc @@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "tensorflow/lite/schema/schema_utils.h" +#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h" #include -#include "tensorflow/lite/kernels/internal/compatibility.h" +#include "tensorflow/compiler/mlir/lite/kernels/internal/compatibility_macros.h" namespace tflite { diff --git a/tensorflow/compiler/mlir/lite/schema/schema_utils.h b/tensorflow/compiler/mlir/lite/schema/schema_utils.h new file mode 100644 index 00000000000..7498aa02ebe --- /dev/null +++ b/tensorflow/compiler/mlir/lite/schema/schema_utils.h @@ -0,0 +1,33 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_UTILS_H_ +#define TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_UTILS_H_ + +#include "flatbuffers/flatbuffers.h" +#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h" + +namespace tflite { + +// The following methods are introduced to resolve op builtin code shortage +// problem. The new builtin operator will be assigned to the extended builtin +// code field in the flatbuffer schema. Those methods helps to hide builtin code +// details. +BuiltinOperator GetBuiltinCode(const OperatorCode *op_code); + +BuiltinOperator GetBuiltinCode(const OperatorCodeT *op_code); + +} // namespace tflite + +#endif // TENSORFLOW_COMPILER_MLIR_LITE_SCHEMA_SCHEMA_UTILS_H_ diff --git a/tensorflow/lite/core/macros.h b/tensorflow/lite/core/macros.h index 9eab6be877d..86de4daefe7 100644 --- a/tensorflow/lite/core/macros.h +++ b/tensorflow/lite/core/macros.h @@ -65,16 +65,4 @@ limitations under the License. #define TFLITE_HAS_ATTRIBUTE_WEAK 0 #endif -#ifndef TF_LITE_STATIC_MEMORY -// maximum size of a valid flatbuffer -inline constexpr unsigned int flatbuffer_size_max = 2147483648; -// If none zero then the buffer is stored outside of the flatbuffers, string -inline constexpr char tflite_metadata_buffer_location[] = "buffer_location"; -// field for minimum runtime version, string -inline constexpr char tflite_metadata_min_runtime_version[] = - "min_runtime_version"; -// the stablehlo op version is supported by the tflite runtime -inline constexpr char tflite_supported_stablehlo_version[] = "1.0.0"; -#endif - #endif // TENSORFLOW_LITE_CORE_MACROS_H_ diff --git a/tensorflow/lite/kernels/internal/portable_tensor_utils.cc b/tensorflow/lite/kernels/internal/portable_tensor_utils.cc index 024043d75d3..577fc6b235b 100644 --- a/tensorflow/lite/kernels/internal/portable_tensor_utils.cc +++ b/tensorflow/lite/kernels/internal/portable_tensor_utils.cc @@ -70,6 +70,12 @@ void ApplySignbitToVector(const float* __restrict__ vector, int v_size, void UnpackDenseInt4IntoInt8(const int8_t* src_buffer, int num_elements, int8_t* dst_buffer) { + // num_elements means the number of elements regardless of packed or unpacked. + // For example, 3 elements means both + // 1) Packed: 3 int4's = 12 bit -> 16 bits (padded) = 2 bytes. + // stored in src_buffer[0] and src_buffer[1] (i = 0..1) + // 2) Unpacked: 3 int8's = 3 bytes. + //. stored in dst_buffer[0], dst_buffer[1] and dst_buffer[2] (j = 0..2) for (int i = 0; i < num_elements / 2; i++) { int8_t byte = src_buffer[i]; // Shift left first so that sign is properly extended when shifted right diff --git a/tensorflow/lite/micro/tools/make/Makefile b/tensorflow/lite/micro/tools/make/Makefile index 90b0c2945ff..8217501f904 100644 --- a/tensorflow/lite/micro/tools/make/Makefile +++ b/tensorflow/lite/micro/tools/make/Makefile @@ -499,10 +499,10 @@ $(wildcard $(TENSORFLOW_ROOT)tensorflow/lite/micro/testing/*.h) # The explicitly specified list of sources and headers that are shared between # TfLite and TFLM are in the ci/sync_from_upstream_tf.sh script. TFL_CC_SRCS := \ -$(shell find $(TENSORFLOW_ROOT)tensorflow/lite -type d \( -path $(TENSORFLOW_ROOT)tensorflow/lite/experimental -o -path $(TENSORFLOW_ROOT)tensorflow/lite/micro \) -prune -false -o -name "*.cc" -o -name "*.c") +$(shell find $(TENSORFLOW_ROOT)tensorflow -type d \( -path $(TENSORFLOW_ROOT)tensorflow/lite/experimental -o -path $(TENSORFLOW_ROOT)tensorflow/lite/micro \) -prune -false -o -name "*.cc" -o -name "*.c") TFL_CC_HDRS := \ -$(shell find $(TENSORFLOW_ROOT)tensorflow/lite -type d \( -path $(TENSORFLOW_ROOT)tensorflow/lite/experimental -o -path $(TENSORFLOW_ROOT)tensorflow/lite/micro \) -prune -false -o -name "*.h") +$(shell find $(TENSORFLOW_ROOT)tensorflow -type d \( -path $(TENSORFLOW_ROOT)tensorflow/lite/experimental -o -path $(TENSORFLOW_ROOT)tensorflow/lite/micro \) -prune -false -o -name "*.h") ifneq ($(BUILD_TYPE), no_tf_lite_static_memory) EXCLUDED_TFL_CC_SRCS := \ diff --git a/tensorflow/lite/python/BUILD b/tensorflow/lite/python/BUILD index 6ca2c578480..7a7ce3c2738 100644 --- a/tensorflow/lite/python/BUILD +++ b/tensorflow/lite/python/BUILD @@ -10,7 +10,7 @@ package( flatbuffer_py_library( name = "schema_py", - srcs = ["//tensorflow/lite/schema:schema.fbs"], + srcs = ["//tensorflow/compiler/mlir/lite/schema:schema.fbs"], ) py_library( diff --git a/tensorflow/lite/schema/BUILD b/tensorflow/lite/schema/BUILD index e87375a2358..199c105e6ad 100644 --- a/tensorflow/lite/schema/BUILD +++ b/tensorflow/lite/schema/BUILD @@ -11,13 +11,13 @@ package( # bazel build schema_fbs_srcs. flatbuffer_cc_library( name = "schema_fbs", - srcs = ["schema.fbs"], + srcs = ["//tensorflow/compiler/mlir/lite/schema:schema.fbs"], ) # Generic schema for inference on device (but with reflections makes bigger). flatbuffer_cc_library( name = "schema_fbs_with_reflection", - srcs = ["schema.fbs"], + srcs = ["//tensorflow/compiler/mlir/lite/schema:schema.fbs"], flatc_args = [ "--reflect-types", "--reflect-names", @@ -29,11 +29,8 @@ flatbuffer_cc_library( cc_library( name = "schema_utils", - srcs = ["schema_utils.cc"], hdrs = ["schema_utils.h"], deps = [ - ":schema_fbs", - "//tensorflow/lite/kernels/internal:compatibility", - "@flatbuffers//:runtime_cc", + "//tensorflow/compiler/mlir/lite/schema:schema_utils", ], ) diff --git a/tensorflow/lite/schema/schema_utils.h b/tensorflow/lite/schema/schema_utils.h index 9cca36c7744..ff04cf14b72 100644 --- a/tensorflow/lite/schema/schema_utils.h +++ b/tensorflow/lite/schema/schema_utils.h @@ -15,19 +15,6 @@ limitations under the License. #ifndef TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_ #define TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_ -#include "flatbuffers/flatbuffers.h" -#include "tensorflow/lite/schema/schema_generated.h" - -namespace tflite { - -// The following methods are introduced to resolve op builtin code shortage -// problem. The new builtin operator will be assigned to the extended builtin -// code field in the flatbuffer schema. Those methods helps to hide builtin code -// details. -BuiltinOperator GetBuiltinCode(const OperatorCode *op_code); - -BuiltinOperator GetBuiltinCode(const OperatorCodeT *op_code); - -} // namespace tflite +#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h" // IWYU pragma: keep #endif // TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_ diff --git a/tensorflow/lite/tools/flatbuffer_utils.py b/tensorflow/lite/tools/flatbuffer_utils.py index 1a6c61e4b9e..a7d1dd1a117 100644 --- a/tensorflow/lite/tools/flatbuffer_utils.py +++ b/tensorflow/lite/tools/flatbuffer_utils.py @@ -58,9 +58,38 @@ def read_model(input_tflite_file): raise RuntimeError('Input file not found at %r\n' % input_tflite_file) with gfile.GFile(input_tflite_file, 'rb') as input_file_handle: model_bytearray = bytearray(input_file_handle.read()) + return read_model_from_bytearray(model_bytearray) + + +def read_model_from_bytearray(model_bytearray): + """Reads a tflite model as a python object. + + Args: + model_bytearray: TFLite model in bytearray format. + + Returns: + A python object corresponding to the input tflite file. + """ model = convert_bytearray_to_object(model_bytearray) if sys.byteorder == 'big': byte_swap_tflite_model_obj(model, 'little', 'big') + + # Offset handling for models > 2GB + for buffer in model.buffers: + if buffer.offset: + buffer.data = model_bytearray[buffer.offset : buffer.offset + buffer.size] + buffer.offset = 0 + buffer.size = 0 + for subgraph in model.subgraphs: + for op in subgraph.operators: + if op.largeCustomOptionsOffset: + op.customOptions = model_bytearray[ + op.largeCustomOptionsOffset : op.largeCustomOptionsOffset + + op.largeCustomOptionsSize + ] + op.largeCustomOptionsOffset = 0 + op.largeCustomOptionsSize = 0 + return model @@ -294,14 +323,10 @@ def byte_swap_buffer_content(buffer, chunksize, from_endiness, to_endiness): buffer.data[i : i + chunksize] for i in range(0, len(buffer.data), chunksize) ] - buffer.data = b''.join( - [ - int.from_bytes(byteswap, from_endiness).to_bytes( - chunksize, to_endiness - ) - for byteswap in to_swap - ] - ) + buffer.data = b''.join([ + int.from_bytes(byteswap, from_endiness).to_bytes(chunksize, to_endiness) + for byteswap in to_swap + ]) def byte_swap_string_content(buffer, from_endiness, to_endiness): @@ -314,14 +339,12 @@ def byte_swap_string_content(buffer, from_endiness, to_endiness): """ num_of_strings = int.from_bytes(buffer.data[0:4], from_endiness) string_content = bytearray(buffer.data[4 * (num_of_strings + 2) :]) - prefix_data = b''.join( - [ - int.from_bytes(buffer.data[i : i + 4], from_endiness).to_bytes( - 4, to_endiness - ) - for i in range(0, (num_of_strings + 1) * 4 + 1, 4) - ] - ) + prefix_data = b''.join([ + int.from_bytes(buffer.data[i : i + 4], from_endiness).to_bytes( + 4, to_endiness + ) + for i in range(0, (num_of_strings + 1) * 4 + 1, 4) + ]) buffer.data = prefix_data + string_content diff --git a/third_party/flatbuffers/build_defs.bzl b/third_party/flatbuffers/build_defs.bzl index b26381e941b..92c9e2ec67d 100644 --- a/third_party/flatbuffers/build_defs.bzl +++ b/third_party/flatbuffers/build_defs.bzl @@ -194,7 +194,7 @@ def flatbuffer_cc_library( reflection binaries for the schemas. ''' output_headers = [ - (out_prefix + "%s_generated.h") % (s.replace(".fbs", "").split("/")[-1]) + (out_prefix + "%s_generated.h") % (s.replace(".fbs", "").split("/")[-1].split(":")[-1]) for s in srcs ] reflection_name = "%s_reflection" % name if gen_reflections else ""