From 21cc930a07c69698e640c3b89c1b1ad8122854db Mon Sep 17 00:00:00 2001 From: bokyeong lee Date: Wed, 11 Sep 2024 15:39:55 +0900 Subject: [PATCH] [luci/service] add include This commit add include. ONE-DCO-1.0-Signed-off-by: bokyeong lee --- compiler/bcq-tools/generate_bcq_metadata.py | 33 +- .../circle-part-value-py-test/conftest.py | 5 +- .../test_circle_part_value.py | 24 +- .../circle-part-value-test/part_eval_one.py | 21 +- compiler/fm-equalize/fm-equalize | 71 +- compiler/luci-pass-value-py-test/conftest.py | 15 +- .../luci-pass-value-py-test/test_luci_eval.py | 69 +- .../eval_result_verifier.py | 29 +- compiler/luci-ref-value-py-test/conftest.py | 5 +- .../luci-ref-value-py-test/test_luci_eval.py | 11 +- compiler/luci-value-py-test/conftest.py | 17 +- compiler/luci-value-py-test/test_luci_eval.py | 73 +- .../luci-value-py-test/test_luci_eval_ref.py | 50 +- .../luci-value-test/luci_eval_verifier.py | 60 +- .../luci-value-test/luci_eval_verifier_ref.py | 50 +- .../luci/service/src/Nodes/CircleRange.cpp | 5 + .../nnc/utils/model_runner/common_place.py | 24 +- .../nnc/utils/prepare_inputs/jpeg2hdf5.py | 5 +- compiler/one-cmds/one-codegen | 15 +- compiler/one-cmds/one-create-quant-dataset | 20 +- compiler/one-cmds/one-import-bcq | 52 +- compiler/one-cmds/one-import-onnx | 61 +- compiler/one-cmds/one-import-pytorch | 49 +- compiler/one-cmds/one-import-tf | 99 +- compiler/one-cmds/one-import-tflite | 12 +- compiler/one-cmds/one-infer | 9 +- compiler/one-cmds/one-init | 27 +- compiler/one-cmds/one-optimize | 21 +- compiler/one-cmds/one-pack | 12 +- compiler/one-cmds/one-partition | 10 +- compiler/one-cmds/one-profile | 11 +- compiler/one-cmds/one-quantize | 103 +- compiler/one-cmds/onecc | 15 +- compiler/one-cmds/onelib/WorkflowRunner.py | 6 +- compiler/one-cmds/onelib/argumentparse.py | 4 +- compiler/one-cmds/onelib/backends.py | 4 +- compiler/one-cmds/onelib/export_constant.py | 14 +- compiler/one-cmds/onelib/utils.py | 36 +- compiler/one-cmds/onnx_legalizer.py | 123 +- .../tests/onnx_legalize_run_compare.py | 5 +- .../validate_onnx2circle.py | 17 +- compiler/onecc-docker/onecc-docker | 13 +- compiler/onnx-tools/onnx-dump.py | 4 +- .../compare_tensors_all.py | 33 +- .../gen_h5_explicit_inputs_all.py | 27 +- .../q_implant_validator.py | 9 +- .../gen_h5_random_inputs.py | 5 +- compiler/tf2tfliteV2/tf2tfliteV2.py | 99 +- compiler/visq-unittest/test/testPalette.py | 6 +- compiler/visq/visq | 121 +- compiler/visq/visqlib/DotBuilder.py | 9 +- infra/nnfw/python/setup.py | 5 +- .../examples/interpolate/__init__.py | 11 +- res/PyTorchExamples/ptem.py | 6 +- .../examples/conv2d_2/__init__.py | 8 +- .../examples/conv2d_transpose/__init__.py | 7 +- .../examples/depthwise_conv2d_1/__init__.py | 4 +- .../examples/depthwise_conv2d_2/__init__.py | 12 +- .../examples/fused_batch_norm/__init__.py | 8 +- .../examples/max_pool_with_argmax/__init__.py | 6 +- .../examples/one_hot/__init__.py | 7 +- .../examples/prelu/__init__.py | 5 +- .../examples/while_2/__init__.py | 6 +- .../examples/while_3/__init__.py | 6 +- .../examples/tconv-bn/__init__.py | 22 +- res/TensorFlowTests/NET_0003/test.py | 8 +- res/TensorFlowTests/NET_0004/test.py | 8 +- runtime/onert/api/python/package/infer.py | 9 +- .../merge_result_of_benchmark_nnpkg.py | 24 +- tools/circle_plus_gen/lib/json_parser.py | 4 +- tools/circle_plus_gen/main.py | 5 +- .../schema/circle_schema_generated.py | 1010 ++++++----------- .../schema/circle_traininfo_generated.py | 36 +- tools/extract_weights_from_tflite/extract.py | 5 +- .../tf_dataset_converter/argparser.py | 75 +- .../tf_dataset_converter/datasets.py | 3 + tools/kernel_report/kernel_report.py | 9 +- tools/model_partition_tool/Graph.py | 8 +- tools/model_partition_tool/test_partition.py | 10 +- tools/nnpackage_tool/gen_golden/gen_golden.py | 21 +- .../nnpackage_tool/model2nnpkg/model2nnpkg.py | 60 +- tools/nnpackage_tool/nnpackager/nnpackager.py | 15 +- tools/nnpackage_tool/qnf/qnf.py | 12 +- .../sth2nnpkgtc/pb_select_graph.py | 17 +- tools/pareto_profiler/estimator/Hlps.py | 7 +- .../estimator/brute_force_profiler.py | 4 +- .../pareto_profiler/estimator/hlps_sampler.py | 39 +- tools/pareto_profiler/estimator/pareto.py | 6 +- .../pareto_profiler/estimator/profile_args.py | 26 +- .../estimator/random_sampler.py | 6 +- tools/pareto_profiler/generator/gen_oplist.py | 28 +- tools/pbfile_tool/extract_subgraph.py | 7 +- tools/pbfile_tool/pb_info.py | 5 +- tools/stab/backend_profiler.py | 1 + tools/stab/backend_scheduler.py | 1 + tools/stab/nnpkg_helper.py | 1 + tools/stab/op_list_parser.py | 1 + tools/stab/remote.py | 1 + tools/stab/stab.py | 62 +- .../tensorflow_model_freezer/base_freezer.py | 38 +- .../model_freezer_util.py | 5 +- .../sample/ARGMAX_gen.py | 6 +- .../sample/ARGMIN_gen.py | 6 +- .../sample/DIV_gen.py | 6 +- .../sample/LOGICAL_AND_gen.py | 13 +- .../sample/LOGICAL_NOT_gen.py | 6 +- .../sample/LOGICAL_OR_gen.py | 13 +- .../sample/MUL_gen.py | 6 +- .../sample/Operation_gen.py | 20 +- .../sample/SQUEEZE_gen.py | 10 +- .../sample/STACK_gen.py | 6 +- .../sample/TOPK_gen.py | 6 +- .../sample/UNSTACK_gen.py | 6 +- tools/tflitefile_tool/model_parser.py | 41 +- .../parser/tflite/tflite_option.py | 5 +- .../tflitefile_tool/printer/string_builder.py | 15 +- tools/tflitefile_tool/select_operator.py | 51 +- .../tests/test_string_builder.py | 24 +- .../tests/test_tflite_parser.py | 4 +- tools/tflkit/summarize_pb.py | 28 +- 120 files changed, 1565 insertions(+), 2015 deletions(-) diff --git a/compiler/bcq-tools/generate_bcq_metadata.py b/compiler/bcq-tools/generate_bcq_metadata.py index c728f3e1057..9281647e242 100644 --- a/compiler/bcq-tools/generate_bcq_metadata.py +++ b/compiler/bcq-tools/generate_bcq_metadata.py @@ -37,21 +37,24 @@ def _get_parser(): description=("Command line tool to generate metadata of BCQ nodes")) # Input and output path. - parser.add_argument("-i", - "--input_path", - type=str, - help="Full filepath of the input file.", - required=True) - parser.add_argument("-o", - "--output_path", - type=str, - help="Full filepath of the output file.", - required=True) - parser.add_argument("-O", - "--output_arrays", - type=str, - help="Original model output arrays", - required=True) + parser.add_argument( + "-i", + "--input_path", + type=str, + help="Full filepath of the input file.", + required=True) + parser.add_argument( + "-o", + "--output_path", + type=str, + help="Full filepath of the output file.", + required=True) + parser.add_argument( + "-O", + "--output_arrays", + type=str, + help="Original model output arrays", + required=True) return parser diff --git a/compiler/circle-part-value-py-test/conftest.py b/compiler/circle-part-value-py-test/conftest.py index 71ff7123fdf..63a78f4b510 100644 --- a/compiler/circle-part-value-py-test/conftest.py +++ b/compiler/circle-part-value-py-test/conftest.py @@ -10,9 +10,8 @@ def extract_test_args(s): def pytest_addoption(parser): parser.addoption("--test_list", action="store", help="Path to test list") parser.addoption("--bin_dir", action="store", help="Directory including artifacts") - parser.addoption("--circle_part_driver", - action="store", - help="Path to circle part driver") + parser.addoption( + "--circle_part_driver", action="store", help="Path to circle part driver") def pytest_generate_tests(metafunc): diff --git a/compiler/circle-part-value-py-test/test_circle_part_value.py b/compiler/circle-part-value-py-test/test_circle_part_value.py index 1f7a1a1c517..98524875fbe 100644 --- a/compiler/circle-part-value-py-test/test_circle_part_value.py +++ b/compiler/circle-part-value-py-test/test_circle_part_value.py @@ -57,20 +57,24 @@ def part_eval(test_name, bin_dir, circle_part_driver): for i in range(num_inputs): input_details = interpreter.get_input_details()[i] if input_details["dtype"] == np.float32: - input_data = np.array(np.random.random_sample(input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.random_sample(input_details["shape"]), input_details["dtype"]) elif input_details["dtype"] == np.uint8: - input_data = np.array(np.random.randint(0, 256, size=input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.randint(0, 256, size=input_details["shape"]), + input_details["dtype"]) elif input_details["dtype"] == np.int16: - input_data = np.array(np.random.randint(0, 100, size=input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.randint(0, 100, size=input_details["shape"]), + input_details["dtype"]) elif input_details["dtype"] == np.int32: - input_data = np.array(np.random.randint(0, 100, size=input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.randint(0, 100, size=input_details["shape"]), + input_details["dtype"]) elif input_details["dtype"] == np.int64: - input_data = np.array(np.random.randint(0, 100, size=input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.randint(0, 100, size=input_details["shape"]), + input_details["dtype"]) elif input_details["dtype"] == np.bool_: input_data = np.array( np.random.choice(a=[True, False], size=input_details["shape"]), diff --git a/compiler/circle-part-value-test/part_eval_one.py b/compiler/circle-part-value-test/part_eval_one.py index ffbfd9c2c19..44661c78b4f 100755 --- a/compiler/circle-part-value-test/part_eval_one.py +++ b/compiler/circle-part-value-test/part_eval_one.py @@ -72,17 +72,18 @@ input_details_dtype = input_details["dtype"] input_details_shape = input_details["shape"] if input_details_dtype == np.float32: - input_data = np.array(np.random.random_sample(input_details_shape), - input_details_dtype) + input_data = np.array( + np.random.random_sample(input_details_shape), input_details_dtype) elif input_details_dtype == np.int16: - input_data = np.array(np.random.randint(0, 100, size=input_details_shape), - input_details_dtype) + input_data = np.array( + np.random.randint(0, 100, size=input_details_shape), input_details_dtype) elif input_details_dtype == np.uint8: - input_data = np.array(np.random.randint(0, 256, size=input_details_shape), - input_details_dtype) + input_data = np.array( + np.random.randint(0, 256, size=input_details_shape), input_details_dtype) elif input_details_dtype == np.bool_: - input_data = np.array(np.random.choice(a=[True, False], size=input_details_shape), - input_details_dtype) + input_data = np.array( + np.random.choice(a=[True, False], size=input_details_shape), + input_details_dtype) else: raise SystemExit("Unsupported input dtype") @@ -123,8 +124,8 @@ raise SystemExit("Execution result of " + tflite_model + " does not match with " + circle_model) elif output_dtype == np.float32: - if np.allclose(luci_output_data, intp_output_data, rtol=1.e-5, - atol=1.e-5) == False: + if np.allclose( + luci_output_data, intp_output_data, rtol=1.e-5, atol=1.e-5) == False: raise SystemExit("Execution result of " + tflite_model + " does not match with " + circle_model) elif output_dtype == np.int64: diff --git a/compiler/fm-equalize/fm-equalize b/compiler/fm-equalize/fm-equalize index 36b4f99a003..c414a48dc72 100644 --- a/compiler/fm-equalize/fm-equalize +++ b/compiler/fm-equalize/fm-equalize @@ -33,16 +33,14 @@ from pathlib import Path def _get_parser(): parser = argparse.ArgumentParser( description='Command line tool to equalize feature map (FM) value distribution') - parser.add_argument("-i", - "--input", - type=str, - help="Path to the input circle model.", - required=True) - parser.add_argument("-o", - "--output", - type=str, - help="Path to the output circle model.", - required=True) + parser.add_argument( + "-i", "--input", type=str, help="Path to the input circle model.", required=True) + parser.add_argument( + "-o", + "--output", + type=str, + help="Path to the output circle model.", + required=True) parser.add_argument( "-f", "--fme_patterns", @@ -62,18 +60,12 @@ def _get_parser(): help="Allow to create duplicate operations when a feature map matches " "with multiple equalization patterns. This can increase the size of " "the model. Default is false.") - parser.add_argument("--fme_detect", - type=str, - help="Path to fme-detect driver.", - required=False) - parser.add_argument("--dalgona", - type=str, - help="Path to dalgona driver.", - required=False) - parser.add_argument("--fme_apply", - type=str, - help="Path to fme-apply driver.", - required=False) + parser.add_argument( + "--fme_detect", type=str, help="Path to fme-detect driver.", required=False) + parser.add_argument( + "--dalgona", type=str, help="Path to dalgona driver.", required=False) + parser.add_argument( + "--fme_apply", type=str, help="Path to fme-apply driver.", required=False) parser.add_argument('--verbose', action='store_true', help='Print logs') return parser @@ -160,23 +152,25 @@ def main(): Path(output_model).with_suffix('.fme_patterns.json').name) # Step 1. Run fme-detect to find equalization patterns - _run_fme_detect(fme_detect_path, - str(input_model), - str(fme_patterns), - verbose=verbose, - allow_dup_op=allow_dup_op) + _run_fme_detect( + fme_detect_path, + str(input_model), + str(fme_patterns), + verbose=verbose, + allow_dup_op=allow_dup_op) # Copy fme_patterns to the given path if args.fme_patterns != None: os.system(f'cp {fme_patterns} {args.fme_patterns}') # Step 2. Run dalgona - _run_dalgona(dalgona_path, - str(input_model), - data, - str(dump_fme_param_py), - str(fme_patterns), - verbose=verbose) + _run_dalgona( + dalgona_path, + str(input_model), + data, + str(dump_fme_param_py), + str(fme_patterns), + verbose=verbose) # Copy fme_patterns to the given path # Why copy twice? To observe the result of fme-detect too @@ -184,11 +178,12 @@ def main(): os.system(f'cp {fme_patterns} {args.fme_patterns}') # Step 3. Run fme-apply - _run_fme_apply(fme_apply_path, - str(input_model), - str(fme_patterns), - str(output_model), - verbose=verbose) + _run_fme_apply( + fme_apply_path, + str(input_model), + str(fme_patterns), + str(output_model), + verbose=verbose) if __name__ == '__main__': diff --git a/compiler/luci-pass-value-py-test/conftest.py b/compiler/luci-pass-value-py-test/conftest.py index be4e65432b0..be8fa3b79f4 100644 --- a/compiler/luci-pass-value-py-test/conftest.py +++ b/compiler/luci-pass-value-py-test/conftest.py @@ -9,15 +9,12 @@ def extract_test_args(s): def pytest_addoption(parser): parser.addoption("--test_list", action="store", help="Path to test list") - parser.addoption("--tflite_dir", - action="store", - help="Directory including tflite file") - parser.addoption("--circle_dir", - action="store", - help="Directory including circle file") - parser.addoption("--luci_eval_driver", - action="store", - help="Path to luci eval driver") + parser.addoption( + "--tflite_dir", action="store", help="Directory including tflite file") + parser.addoption( + "--circle_dir", action="store", help="Directory including circle file") + parser.addoption( + "--luci_eval_driver", action="store", help="Path to luci eval driver") def pytest_generate_tests(metafunc): diff --git a/compiler/luci-pass-value-py-test/test_luci_eval.py b/compiler/luci-pass-value-py-test/test_luci_eval.py index b5397d851fd..4cb59c177b7 100644 --- a/compiler/luci-pass-value-py-test/test_luci_eval.py +++ b/compiler/luci-pass-value-py-test/test_luci_eval.py @@ -38,20 +38,24 @@ def luci_eval_verify(test_name, for i in range(num_inputs): input_details = interpreter.get_input_details()[i] if input_details["dtype"] == np.float32: - input_data = np.array(np.random.random_sample(input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.random_sample(input_details["shape"]), input_details["dtype"]) elif input_details["dtype"] == np.uint8: - input_data = np.array(np.random.randint(0, 256, size=input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.randint(0, 256, size=input_details["shape"]), + input_details["dtype"]) elif input_details["dtype"] == np.int16: - input_data = np.array(np.random.randint(0, 100, size=input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.randint(0, 100, size=input_details["shape"]), + input_details["dtype"]) elif input_details["dtype"] == np.int32: - input_data = np.array(np.random.randint(0, 100, size=input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.randint(0, 100, size=input_details["shape"]), + input_details["dtype"]) elif input_details["dtype"] == np.int64: - input_data = np.array(np.random.randint(0, 100, size=input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.randint(0, 100, size=input_details["shape"]), + input_details["dtype"]) elif input_details["dtype"] == np.bool_: input_data = np.array( np.random.choice(a=[True, False], size=input_details["shape"]), @@ -66,11 +70,12 @@ def luci_eval_verify(test_name, interpreter.invoke() # Execute luci interpreter. - subprocess.run([ - eval_driver, circle_model, - str(num_inputs), circle_model + ".input", circle_model + ".output" - ], - check=True) + subprocess.run( + [ + eval_driver, circle_model, + str(num_inputs), circle_model + ".input", circle_model + ".output" + ], + check=True) # Compare the results. inpt_output_details = interpreter.get_output_details() @@ -87,33 +92,23 @@ def luci_eval_verify(test_name, intp_output_data = interpreter.get_tensor(output_tensor) err_msg = "Execution result of " + tflite_model + " does not match with " + circle_model if output_details["dtype"] == np.uint8: - assert np.allclose(luci_output_data, - intp_output_data, - rtol=rtolint, - atol=atolint), err_msg + assert np.allclose( + luci_output_data, intp_output_data, rtol=rtolint, atol=atolint), err_msg elif output_details["dtype"] == np.float32: - assert np.allclose(luci_output_data, - intp_output_data, - rtol=rtolf32, - atol=atolf32), err_msg + assert np.allclose( + luci_output_data, intp_output_data, rtol=rtolf32, atol=atolf32), err_msg elif output_details["dtype"] == np.int64: - assert np.allclose(luci_output_data, - intp_output_data, - rtol=rtolint, - atol=atolint), err_msg + assert np.allclose( + luci_output_data, intp_output_data, rtol=rtolint, atol=atolint), err_msg elif output_details["dtype"] == np.int32: - assert np.allclose(luci_output_data, - intp_output_data, - rtol=rtolint, - atol=atolint), err_msg + assert np.allclose( + luci_output_data, intp_output_data, rtol=rtolint, atol=atolint), err_msg elif output_details["dtype"] == np.int16: - assert np.allclose(luci_output_data, - intp_output_data, - rtol=rtolint, - atol=atolint), err_msg + assert np.allclose( + luci_output_data, intp_output_data, rtol=rtolint, atol=atolint), err_msg elif output_details["dtype"] == np.bool_: - assert np.allclose(luci_output_data, intp_output_data, rtol=0, - atol=0), err_msg + assert np.allclose( + luci_output_data, intp_output_data, rtol=0, atol=0), err_msg else: assert False, "Unsupported data type: " + output_details["dtype"] diff --git a/compiler/luci-pass-value-test/eval_result_verifier.py b/compiler/luci-pass-value-test/eval_result_verifier.py index ea141208dcc..0073c4db5aa 100644 --- a/compiler/luci-pass-value-test/eval_result_verifier.py +++ b/compiler/luci-pass-value-test/eval_result_verifier.py @@ -39,14 +39,16 @@ for i in range(num_inputs): input_details = interpreter.get_input_details()[i] if input_details["dtype"] == np.float32: - input_data = np.array(np.random.random_sample(input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.random_sample(input_details["shape"]), input_details["dtype"]) elif input_details["dtype"] == np.uint8: - input_data = np.array(np.random.randint(0, 256, size=input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.randint(0, 256, size=input_details["shape"]), + input_details["dtype"]) elif input_details["dtype"] == np.int16: - input_data = np.array(np.random.randint(0, 100, size=input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.randint(0, 100, size=input_details["shape"]), + input_details["dtype"]) elif input_details["dtype"] == np.bool_: input_data = np.array( np.random.choice(a=[True, False], size=input_details["shape"]), @@ -61,11 +63,12 @@ interpreter.invoke() # Execute luci interpreter. -subprocess.run([ - driver, circle_model, - str(num_inputs), circle_model + ".input", circle_model + ".output" -], - check=True) +subprocess.run( + [ + driver, circle_model, + str(num_inputs), circle_model + ".input", circle_model + ".output" + ], + check=True) # Compare the results. inpt_output_details = interpreter.get_output_details() @@ -86,8 +89,8 @@ raise SystemExit("Execution result of " + tflite_model + " does not match with " + circle_model) elif output_details["dtype"] == np.float32: - if np.allclose(luci_output_data, intp_output_data, rtol=1.e-5, - atol=1.e-5) == False: + if np.allclose( + luci_output_data, intp_output_data, rtol=1.e-5, atol=1.e-5) == False: raise SystemExit("Execution result of " + tflite_model + " does not match with " + circle_model) elif output_details["dtype"] == np.int64: diff --git a/compiler/luci-ref-value-py-test/conftest.py b/compiler/luci-ref-value-py-test/conftest.py index f535356d8eb..b7d46902cb2 100644 --- a/compiler/luci-ref-value-py-test/conftest.py +++ b/compiler/luci-ref-value-py-test/conftest.py @@ -15,9 +15,8 @@ def pytest_addoption(parser): parser.addoption("--tflrecipe", action="store", help="Path to tfl recipies") parser.addoption("--circlerecipe", action="store", help="Path to circle recipies") parser.addoption("--binary", action="store", help="Path to test binary") - parser.addoption("--luci_eval_driver", - action="store", - help="Path to luci eval driver") + parser.addoption( + "--luci_eval_driver", action="store", help="Path to luci eval driver") def copy_if_changed(src_filepath, dst_filepath): diff --git a/compiler/luci-ref-value-py-test/test_luci_eval.py b/compiler/luci-ref-value-py-test/test_luci_eval.py index ef8b6c19aa4..cbe991d462e 100644 --- a/compiler/luci-ref-value-py-test/test_luci_eval.py +++ b/compiler/luci-ref-value-py-test/test_luci_eval.py @@ -89,11 +89,12 @@ def luci_eval_verify(test_name, binary_path, eval_driver, rtolf32=1e-5, atolf32= assert num_ouputs > 0, "No valid reference output file" # Execute luci interpreter. - subprocess.run([ - eval_driver, circle_model, - str(num_inputs), circle_model + ".input", circle_model + ".output" - ], - check=True) + subprocess.run( + [ + eval_driver, circle_model, + str(num_inputs), circle_model + ".input", circle_model + ".output" + ], + check=True) # Compare the results. for idx in range(num_ouputs): diff --git a/compiler/luci-value-py-test/conftest.py b/compiler/luci-value-py-test/conftest.py index 4e4bf111066..042a265aa83 100644 --- a/compiler/luci-value-py-test/conftest.py +++ b/compiler/luci-value-py-test/conftest.py @@ -10,12 +10,10 @@ def extract_test_args(s): def pytest_addoption(parser): parser.addoption("--test_list", action="store", help="Path to test list") parser.addoption("--artifacts", action="store", help="Path to test artifacts") - parser.addoption("--target_artifacts", - action="store", - help="Path to test target artifacts") - parser.addoption("--luci_eval_driver", - action="store", - help="Path to luci eval driver") + parser.addoption( + "--target_artifacts", action="store", help="Path to test target artifacts") + parser.addoption( + "--luci_eval_driver", action="store", help="Path to luci eval driver") def pytest_generate_tests(metafunc): @@ -58,9 +56,10 @@ def pytest_generate_tests(metafunc): eval_driver_path) for arg in test_args if len(arg.split()) == 1] # eval(TEST_NAME RTOL ATOL) - ref_tests_with_tol = [(arg.split()[0], artifacts_path, target_artifacts_path, - eval_driver_path, arg.split()[1], arg.split()[2]) - for arg in test_args if len(arg.split()) == 3] + ref_tests_with_tol = [(arg.split()[0], artifacts_path, + target_artifacts_path, eval_driver_path, arg.split()[1], + arg.split()[2]) for arg in test_args + if len(arg.split()) == 3] # # for cross platform test # diff --git a/compiler/luci-value-py-test/test_luci_eval.py b/compiler/luci-value-py-test/test_luci_eval.py index 8998ee55e1d..b3fa4422b52 100644 --- a/compiler/luci-value-py-test/test_luci_eval.py +++ b/compiler/luci-value-py-test/test_luci_eval.py @@ -33,24 +33,28 @@ def luci_eval_verify(test_name, artifacts, eval_driver, rtolf32=1e-5, atolf32=1e for i in range(num_inputs): input_details = interpreter.get_input_details()[i] if input_details["dtype"] == np.float32: - input_data = np.array(np.random.random_sample(input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.random_sample(input_details["shape"]), input_details["dtype"]) input_dtype = "float32" elif input_details["dtype"] == np.uint8: - input_data = np.array(np.random.randint(0, 256, size=input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.randint(0, 256, size=input_details["shape"]), + input_details["dtype"]) input_dtype = "uint8" elif input_details["dtype"] == np.int16: - input_data = np.array(np.random.randint(0, 100, size=input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.randint(0, 100, size=input_details["shape"]), + input_details["dtype"]) input_dtype = "int16" elif input_details["dtype"] == np.int32: - input_data = np.array(np.random.randint(0, 100, size=input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.randint(0, 100, size=input_details["shape"]), + input_details["dtype"]) input_dtype = "int32" elif input_details["dtype"] == np.int64: - input_data = np.array(np.random.randint(0, 100, size=input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.randint(0, 100, size=input_details["shape"]), + input_details["dtype"]) input_dtype = "int64" elif input_details["dtype"] == np.bool_: input_data = np.array( @@ -62,8 +66,8 @@ def luci_eval_verify(test_name, artifacts, eval_driver, rtolf32=1e-5, atolf32=1e interpreter.set_tensor(input_details["index"], input_data) input_data.tofile(circle_model + ".input" + str(i)) - input_details["shape"].tofile(circle_model + ".input" + str(i) + ".shape", - sep=',') + input_details["shape"].tofile( + circle_model + ".input" + str(i) + ".shape", sep=',') with open(circle_model + ".input" + str(i) + ".dtype", 'w') as dtype_file: dtype_file.write(input_dtype) @@ -71,11 +75,12 @@ def luci_eval_verify(test_name, artifacts, eval_driver, rtolf32=1e-5, atolf32=1e interpreter.invoke() # Execute luci interpreter. - subprocess.run([ - eval_driver, circle_model, - str(num_inputs), circle_model + ".input", circle_model + ".output" - ], - check=True) + subprocess.run( + [ + eval_driver, circle_model, + str(num_inputs), circle_model + ".input", circle_model + ".output" + ], + check=True) # Compare the results. inpt_output_details = interpreter.get_output_details() @@ -92,38 +97,28 @@ def luci_eval_verify(test_name, artifacts, eval_driver, rtolf32=1e-5, atolf32=1e intp_output_data = interpreter.get_tensor(output_tensor) err_msg = "Execution result of " + tflite_model + " does not match with " + circle_model if output_details["dtype"] == np.uint8: - assert np.allclose(luci_output_data, - intp_output_data, - rtol=rtolint, - atol=atolint), err_msg + assert np.allclose( + luci_output_data, intp_output_data, rtol=rtolint, atol=atolint), err_msg output_dtype = "uint8" elif output_details["dtype"] == np.float32: - assert np.allclose(luci_output_data, - intp_output_data, - rtol=rtolf32, - atol=atolf32), err_msg + assert np.allclose( + luci_output_data, intp_output_data, rtol=rtolf32, atol=atolf32), err_msg output_dtype = "float32" elif output_details["dtype"] == np.int64: - assert np.allclose(luci_output_data, - intp_output_data, - rtol=rtolint, - atol=atolint), err_msg + assert np.allclose( + luci_output_data, intp_output_data, rtol=rtolint, atol=atolint), err_msg output_dtype = "int64" elif output_details["dtype"] == np.int32: - assert np.allclose(luci_output_data, - intp_output_data, - rtol=rtolint, - atol=atolint), err_msg + assert np.allclose( + luci_output_data, intp_output_data, rtol=rtolint, atol=atolint), err_msg output_dtype = "int32" elif output_details["dtype"] == np.int16: - assert np.allclose(luci_output_data, - intp_output_data, - rtol=rtolint, - atol=atolint), err_msg + assert np.allclose( + luci_output_data, intp_output_data, rtol=rtolint, atol=atolint), err_msg output_dtype = "int16" elif output_details["dtype"] == np.bool_: - assert np.allclose(luci_output_data, intp_output_data, rtol=0, - atol=0), err_msg + assert np.allclose( + luci_output_data, intp_output_data, rtol=0, atol=0), err_msg output_dtype = "bool" else: assert False, "Unsupported data type: " + output_details["dtype"] diff --git a/compiler/luci-value-py-test/test_luci_eval_ref.py b/compiler/luci-value-py-test/test_luci_eval_ref.py index 2c8c2af7b0d..f476c78fa28 100644 --- a/compiler/luci-value-py-test/test_luci_eval_ref.py +++ b/compiler/luci-value-py-test/test_luci_eval_ref.py @@ -74,11 +74,12 @@ def luci_eval_verify_ref(test_name, assert num_outputs != 0, "output file not exist for " + circle_model_ref # Execute luci interpreter with reference input - subprocess.run([ - eval_driver, circle_model_ref, - str(num_inputs), circle_model_ref + ".input", circle_model + ".output" - ], - check=True) + subprocess.run( + [ + eval_driver, circle_model_ref, + str(num_inputs), circle_model_ref + ".input", circle_model + ".output" + ], + check=True) # Compare the results. for idx in range(num_outputs): @@ -95,33 +96,28 @@ def luci_eval_verify_ref(test_name, err_msg = "Execution result of " + circle_model_ref + " does not match with " + circle_model if output_dtype == np.uint8: - assert np.allclose(luci_output_data, - luci_output_data_ref, - rtol=rtolint, - atol=atolint), err_msg + assert np.allclose( + luci_output_data, luci_output_data_ref, rtol=rtolint, + atol=atolint), err_msg elif output_dtype == np.float32: - assert np.allclose(luci_output_data, - luci_output_data_ref, - rtol=rtolf32, - atol=atolf32), err_msg + assert np.allclose( + luci_output_data, luci_output_data_ref, rtol=rtolf32, + atol=atolf32), err_msg elif output_dtype == np.int64: - assert np.allclose(luci_output_data, - luci_output_data_ref, - rtol=rtolint, - atol=atolint), err_msg + assert np.allclose( + luci_output_data, luci_output_data_ref, rtol=rtolint, + atol=atolint), err_msg elif output_dtype == np.int32: - assert np.allclose(luci_output_data, - luci_output_data_ref, - rtol=rtolint, - atol=atolint), err_msg + assert np.allclose( + luci_output_data, luci_output_data_ref, rtol=rtolint, + atol=atolint), err_msg elif output_dtype == np.int16: - assert np.allclose(luci_output_data, - luci_output_data_ref, - rtol=rtolint, - atol=atolint), err_msg + assert np.allclose( + luci_output_data, luci_output_data_ref, rtol=rtolint, + atol=atolint), err_msg elif output_dtype == np.bool_: - assert np.allclose(luci_output_data, luci_output_data_ref, rtol=0, - atol=0), err_msg + assert np.allclose( + luci_output_data, luci_output_data_ref, rtol=0, atol=0), err_msg else: assert False, "Unsupported data type: " + output_dtype diff --git a/compiler/luci-value-test/luci_eval_verifier.py b/compiler/luci-value-test/luci_eval_verifier.py index e934260b869..f74b2201ff2 100755 --- a/compiler/luci-value-test/luci_eval_verifier.py +++ b/compiler/luci-value-test/luci_eval_verifier.py @@ -59,24 +59,28 @@ for i in range(num_inputs): input_details = interpreter.get_input_details()[i] if input_details["dtype"] == np.float32: - input_data = np.array(np.random.random_sample(input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.random_sample(input_details["shape"]), input_details["dtype"]) input_dtype = "float32" elif input_details["dtype"] == np.uint8: - input_data = np.array(np.random.randint(0, 256, size=input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.randint(0, 256, size=input_details["shape"]), + input_details["dtype"]) input_dtype = "uint8" elif input_details["dtype"] == np.int16: - input_data = np.array(np.random.randint(0, 100, size=input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.randint(0, 100, size=input_details["shape"]), + input_details["dtype"]) input_dtype = "int16" elif input_details["dtype"] == np.int32: - input_data = np.array(np.random.randint(0, 100, size=input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.randint(0, 100, size=input_details["shape"]), + input_details["dtype"]) input_dtype = "int32" elif input_details["dtype"] == np.int64: - input_data = np.array(np.random.randint(0, 100, size=input_details["shape"]), - input_details["dtype"]) + input_data = np.array( + np.random.randint(0, 100, size=input_details["shape"]), + input_details["dtype"]) input_dtype = "int64" elif input_details["dtype"] == np.bool_: input_data = np.array( @@ -96,11 +100,12 @@ interpreter.invoke() # Execute luci interpreter. -subprocess.run([ - driver, circle_model, - str(num_inputs), circle_model + ".input", circle_model + ".output" -], - check=True) +subprocess.run( + [ + driver, circle_model, + str(num_inputs), circle_model + ".input", circle_model + ".output" + ], + check=True) # Compare the results. inpt_output_details = interpreter.get_output_details() @@ -117,40 +122,45 @@ intp_output_data = interpreter.get_tensor(output_tensor) try: if output_details["dtype"] == np.uint8: - if np.allclose(luci_output_data, intp_output_data, rtol=rtolint, - atol=atolint) == False: + if np.allclose( + luci_output_data, intp_output_data, rtol=rtolint, + atol=atolint) == False: print("intp_output_data", intp_output_data) print("luci_output_data", luci_output_data) raise SystemExit("Execution result of " + tflite_model + " does not match with " + circle_model) output_dtype = "uint8" elif output_details["dtype"] == np.float32: - if np.allclose(luci_output_data, intp_output_data, rtol=rtolf32, - atol=atolf32) == False: + if np.allclose( + luci_output_data, intp_output_data, rtol=rtolf32, + atol=atolf32) == False: print("intp_output_data", intp_output_data) print("luci_output_data", luci_output_data) raise SystemExit("Execution result of " + tflite_model + " does not match with " + circle_model) output_dtype = "float32" elif output_details["dtype"] == np.int64: - if np.allclose(luci_output_data, intp_output_data, rtol=rtolint, - atol=atolint) == False: + if np.allclose( + luci_output_data, intp_output_data, rtol=rtolint, + atol=atolint) == False: print("intp_output_data", intp_output_data) print("luci_output_data", luci_output_data) raise SystemExit("Execution result of " + tflite_model + " does not match with " + circle_model) output_dtype = "int64" elif output_details["dtype"] == np.int32: - if np.allclose(luci_output_data, intp_output_data, rtol=rtolint, - atol=atolint) == False: + if np.allclose( + luci_output_data, intp_output_data, rtol=rtolint, + atol=atolint) == False: print("intp_output_data", intp_output_data) print("luci_output_data", luci_output_data) raise SystemExit("Execution result of " + tflite_model + " does not match with " + circle_model) output_dtype = "int32" elif output_details["dtype"] == np.int16: - if np.allclose(luci_output_data, intp_output_data, rtol=rtolint, - atol=atolint) == False: + if np.allclose( + luci_output_data, intp_output_data, rtol=rtolint, + atol=atolint) == False: print("intp_output_data", intp_output_data) print("luci_output_data", luci_output_data) raise SystemExit("Execution result of " + tflite_model + diff --git a/compiler/luci-value-test/luci_eval_verifier_ref.py b/compiler/luci-value-test/luci_eval_verifier_ref.py index 96ec2486ce9..3e4d93855ee 100755 --- a/compiler/luci-value-test/luci_eval_verifier_ref.py +++ b/compiler/luci-value-test/luci_eval_verifier_ref.py @@ -96,11 +96,12 @@ def dtype_from_file(file_path): quit(128) # Execute luci interpreter with reference input -subprocess.run([ - driver, circle_model_ref, - str(num_inputs), circle_model_ref + ".input", circle_model + ".output" -], - check=True) +subprocess.run( + [ + driver, circle_model_ref, + str(num_inputs), circle_model_ref + ".input", circle_model + ".output" + ], + check=True) # Compare the results. for idx in range(num_outputs): @@ -116,53 +117,48 @@ def dtype_from_file(file_path): try: if output_dtype == np.uint8: - if np.allclose(luci_output_data, - luci_output_data_ref, - rtol=rtolint, - atol=atolint) == False: + if np.allclose( + luci_output_data, luci_output_data_ref, rtol=rtolint, + atol=atolint) == False: print("luci_output_data_ref", luci_output_data_ref) print("luci_output_data", luci_output_data) raise SystemExit("Execution result of " + circle_model_ref + " does not match with " + circle_model) elif output_dtype == np.float32: - if np.allclose(luci_output_data, - luci_output_data_ref, - rtol=rtolf32, - atol=atolf32) == False: + if np.allclose( + luci_output_data, luci_output_data_ref, rtol=rtolf32, + atol=atolf32) == False: print("luci_output_data_ref", luci_output_data_ref) print("luci_output_data", luci_output_data) raise SystemExit("Execution result of " + circle_model_ref + " does not match with " + circle_model) elif output_dtype == np.int64: - if np.allclose(luci_output_data, - luci_output_data_ref, - rtol=rtolint, - atol=atolint) == False: + if np.allclose( + luci_output_data, luci_output_data_ref, rtol=rtolint, + atol=atolint) == False: print("luci_output_data_ref", luci_output_data_ref) print("luci_output_data", luci_output_data) raise SystemExit("Execution result of " + circle_model_ref + " does not match with " + circle_model) elif output_dtype == np.int32: - if np.allclose(luci_output_data, - luci_output_data_ref, - rtol=rtolint, - atol=atolint) == False: + if np.allclose( + luci_output_data, luci_output_data_ref, rtol=rtolint, + atol=atolint) == False: print("luci_output_data_ref", luci_output_data_ref) print("luci_output_data", luci_output_data) raise SystemExit("Execution result of " + circle_model_ref + " does not match with " + circle_model) elif output_dtype == np.int16: - if np.allclose(luci_output_data, - luci_output_data_ref, - rtol=rtolint, - atol=atolint) == False: + if np.allclose( + luci_output_data, luci_output_data_ref, rtol=rtolint, + atol=atolint) == False: print("luci_output_data_ref", luci_output_data_ref) print("luci_output_data", luci_output_data) raise SystemExit("Execution result of " + circle_model_ref + " does not match with " + circle_model) elif output_dtype == np.bool_: - if np.allclose(luci_output_data, luci_output_data_ref, rtol=0, - atol=0) == False: + if np.allclose( + luci_output_data, luci_output_data_ref, rtol=0, atol=0) == False: raise SystemExit("Execution result of " + circle_model_ref + " does not match with " + circle_model) else: diff --git a/compiler/luci/service/src/Nodes/CircleRange.cpp b/compiler/luci/service/src/Nodes/CircleRange.cpp index ba8f45579c7..a66358ec11e 100644 --- a/compiler/luci/service/src/Nodes/CircleRange.cpp +++ b/compiler/luci/service/src/Nodes/CircleRange.cpp @@ -14,7 +14,12 @@ * limitations under the License. */ +#include "luci/Service/CircleShapeInference.h" + #include "CircleCloneNode.h" +#include "CircleShapeInferenceHelper.h" + +#include namespace luci { diff --git a/compiler/nnc/utils/model_runner/common_place.py b/compiler/nnc/utils/model_runner/common_place.py index c184a35a743..eb89534555e 100755 --- a/compiler/nnc/utils/model_runner/common_place.py +++ b/compiler/nnc/utils/model_runner/common_place.py @@ -11,17 +11,19 @@ def regular_step(): """ parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter) - parser.add_argument('-m', - '--model', - help=("specify input file with NN model, \n[depends from model, " - " two for caffe and caffe2, one for onnx and tflite]"), - nargs='+') - parser.add_argument('-i', - '--input', - help=(" specify file with neural" - " network input data, hdf5 for caffe caffe2 tflite " - "and pb for onnx"), - required=True) + parser.add_argument( + '-m', + '--model', + help=("specify input file with NN model, \n[depends from model, " + " two for caffe and caffe2, one for onnx and tflite]"), + nargs='+') + parser.add_argument( + '-i', + '--input', + help=(" specify file with neural" + " network input data, hdf5 for caffe caffe2 tflite " + "and pb for onnx"), + required=True) parser.add_argument( '-o', '--output_path', diff --git a/compiler/nnc/utils/prepare_inputs/jpeg2hdf5.py b/compiler/nnc/utils/prepare_inputs/jpeg2hdf5.py index 3d11af02290..54f12b06230 100755 --- a/compiler/nnc/utils/prepare_inputs/jpeg2hdf5.py +++ b/compiler/nnc/utils/prepare_inputs/jpeg2hdf5.py @@ -131,9 +131,8 @@ narr = np.append(narr, mixed_ch) elif fw == 'caf': dset_shape = (1, 3, h, w) - narr = np.fromstring(rfb + gfb + bfb, - count=(3 * h * w), - dtype='float32') + narr = np.fromstring( + rfb + gfb + bfb, count=(3 * h * w), dtype='float32') for i in range(3 * h * w): narr[i] /= 255.0 if remove_existing: diff --git a/compiler/one-cmds/one-codegen b/compiler/one-cmds/one-codegen index 8551056f6a8..1081146e233 100644 --- a/compiler/one-cmds/one-codegen +++ b/compiler/one-cmds/one-codegen @@ -38,8 +38,8 @@ sys.tracebacklimit = 0 def _get_parser(backends_list): codegen_usage = 'one-codegen [-h] [-v] [-C CONFIG] [-b BACKEND | -T TARGET] [--] [COMMANDS FOR BACKEND]' - parser = argparse.ArgumentParser(description='command line tool for code generation', - usage=codegen_usage) + parser = argparse.ArgumentParser( + description='command line tool for code generation', usage=codegen_usage) oneutils.add_default_arg(parser) @@ -52,10 +52,8 @@ def _get_parser(backends_list): backends_name) + ')' backend_help_message = 'backend name to use ' + backends_name_message parser.add_argument('-b', '--backend', type=str, help=backend_help_message) - parser.add_argument('-T', - '--target', - type=str, - help='run with specific target of the backend') + parser.add_argument( + '-T', '--target', type=str, help='run with specific target of the backend') return parser @@ -218,9 +216,8 @@ def main(): # check if command schema for the backend exists # 1. if it exists, run the command according to the schema. # 2. if it doesn't exist, insert "--target ${TARGET}" at the beginning of the given command. - parser = oneutils.get_arg_parser(given_backend, - cmd="codegen", - target=target_to_run) + parser = oneutils.get_arg_parser( + given_backend, cmd="codegen", target=target_to_run) # [11], [13] if target_to_run and parser: if oneutils.is_valid_attr(cfg_args, 'command'): diff --git a/compiler/one-cmds/one-create-quant-dataset b/compiler/one-cmds/one-create-quant-dataset index 8262b9efde6..00a32441ef7 100644 --- a/compiler/one-cmds/one-create-quant-dataset +++ b/compiler/one-cmds/one-create-quant-dataset @@ -31,12 +31,13 @@ def get_parser(): """Create and return given the argument parser""" parser = argparse.ArgumentParser( description='command line tool to convert data files to hdf5 file') - parser.add_argument("-i", - "--input_data_format", - type=str, - help="Input data format of either rawdata or numpy", - choices=['rawdata', 'numpy'], - required=True) + parser.add_argument( + "-i", + "--input_data_format", + type=str, + help="Input data format of either rawdata or numpy", + choices=['rawdata', 'numpy'], + required=True) parser.add_argument( "-l", "--data_list", @@ -46,11 +47,8 @@ def get_parser(): "For a multi-input model, input data files for the same inference call " + \ "have to be separated by space in the same line.", required=True) - parser.add_argument("-p", - "--output_path", - type=str, - help="Path to the output file.", - required=True) + parser.add_argument( + "-p", "--output_path", type=str, help="Path to the output file.", required=True) return parser diff --git a/compiler/one-cmds/one-import-bcq b/compiler/one-cmds/one-import-bcq index 20e236fea3c..fc0f75cc8dd 100644 --- a/compiler/one-cmds/one-import-bcq +++ b/compiler/one-cmds/one-import-bcq @@ -47,34 +47,33 @@ def _get_parser(): # converter version converter_version = tf2tfliteV2_group.add_mutually_exclusive_group() - converter_version.add_argument('--v1', - action='store_const', - dest='converter_version_cmd', - const='--v1', - help='use TensorFlow Lite Converter 1.x') - converter_version.add_argument('--v2', - action='store_const', - dest='converter_version_cmd', - const='--v2', - help='use TensorFlow Lite Converter 2.x') + converter_version.add_argument( + '--v1', + action='store_const', + dest='converter_version_cmd', + const='--v1', + help='use TensorFlow Lite Converter 1.x') + converter_version.add_argument( + '--v2', + action='store_const', + dest='converter_version_cmd', + const='--v2', + help='use TensorFlow Lite Converter 2.x') parser.add_argument('--converter_version', type=str, help=argparse.SUPPRESS) # input and output path. - tf2tfliteV2_group.add_argument('-i', - '--input_path', - type=str, - help='full filepath of the input file') - tf2tfliteV2_group.add_argument('-o', - '--output_path', - type=str, - help='full filepath of the output file') + tf2tfliteV2_group.add_argument( + '-i', '--input_path', type=str, help='full filepath of the input file') + tf2tfliteV2_group.add_argument( + '-o', '--output_path', type=str, help='full filepath of the output file') # input and output arrays. - tf2tfliteV2_group.add_argument('-I', - '--input_arrays', - type=str, - help='names of the input arrays, comma-separated') + tf2tfliteV2_group.add_argument( + '-I', + '--input_arrays', + type=str, + help='names of the input arrays, comma-separated') tf2tfliteV2_group.add_argument( '-s', '--input_shapes', @@ -82,10 +81,11 @@ def _get_parser(): help= 'shapes corresponding to --input_arrays, colon-separated (ex:"1,4,4,3:1,20,20,3")' ) - tf2tfliteV2_group.add_argument('-O', - '--output_arrays', - type=str, - help='names of the output arrays, comma-separated') + tf2tfliteV2_group.add_argument( + '-O', + '--output_arrays', + type=str, + help='names of the output arrays, comma-separated') return parser diff --git a/compiler/one-cmds/one-import-onnx b/compiler/one-cmds/one-import-onnx index 4a1960e332c..dddaf089981 100644 --- a/compiler/one-cmds/one-import-onnx +++ b/compiler/one-cmds/one-import-onnx @@ -138,33 +138,30 @@ def _get_parser(): tf2tfliteV2_group = parser.add_argument_group('converter arguments') # input and output path. - tf2tfliteV2_group.add_argument('-i', - '--input_path', - type=str, - help='full filepath of the input file') - tf2tfliteV2_group.add_argument('-o', - '--output_path', - type=str, - help='full filepath of the output file') + tf2tfliteV2_group.add_argument( + '-i', '--input_path', type=str, help='full filepath of the input file') + tf2tfliteV2_group.add_argument( + '-o', '--output_path', type=str, help='full filepath of the output file') # input and output arrays. - tf2tfliteV2_group.add_argument('-I', - '--input_arrays', - type=str, - help='names of the input arrays, comma-separated') - tf2tfliteV2_group.add_argument('-O', - '--output_arrays', - type=str, - help='names of the output arrays, comma-separated') + tf2tfliteV2_group.add_argument( + '-I', + '--input_arrays', + type=str, + help='names of the input arrays, comma-separated') + tf2tfliteV2_group.add_argument( + '-O', + '--output_arrays', + type=str, + help='names of the output arrays, comma-separated') # fixed options tf2tfliteV2_group.add_argument('--model_format', default='saved_model') tf2tfliteV2_group.add_argument('--converter_version', default='v2') parser.add_argument('--unroll_rnn', action='store_true', help='Unroll RNN operators') - parser.add_argument('--unroll_lstm', - action='store_true', - help='Unroll LSTM operators') + parser.add_argument( + '--unroll_lstm', action='store_true', help='Unroll LSTM operators') parser.add_argument( '--keep_io_order', action='store_true', @@ -191,19 +188,22 @@ def _get_parser(): help='Use one-import-onnx-ext in first attempt and skip default tool') # do not call one-import-onnx-ext when default one-import-onnx fails - use_extension.add_argument('--disable_ext', - action='store_true', - help='Disable one-import-onnx-ext for second attempt') + use_extension.add_argument( + '--disable_ext', + action='store_true', + help='Disable one-import-onnx-ext for second attempt') # save intermediate file(s) - parser.add_argument('--save_intermediate', - action='store_true', - help='Save intermediate files to output folder') + parser.add_argument( + '--save_intermediate', + action='store_true', + help='Save intermediate files to output folder') # experimental options - parser.add_argument('--experimental_disable_batchmatmul_unfold', - action='store_true', - help='Experimental disable BatchMatMul unfold') + parser.add_argument( + '--experimental_disable_batchmatmul_unfold', + action='store_true', + help='Experimental disable BatchMatMul unfold') return parser @@ -372,9 +372,8 @@ def _convert(args): f.write((' '.join(tflite2circle_cmd) + '\n').encode()) # convert tflite to circle - res_conv = oneutils.run_ret(tflite2circle_cmd, - err_prefix="tflite2circle", - logfile=f) + res_conv = oneutils.run_ret( + tflite2circle_cmd, err_prefix="tflite2circle", logfile=f) except: res_conv = -1 diff --git a/compiler/one-cmds/one-import-pytorch b/compiler/one-cmds/one-import-pytorch index be941f891da..5939aa0b8dc 100644 --- a/compiler/one-cmds/one-import-pytorch +++ b/compiler/one-cmds/one-import-pytorch @@ -54,18 +54,12 @@ def _get_parser(): converter_group = parser.add_argument_group('converter arguments') # input and output path. - converter_group.add_argument('-i', - '--input_path', - type=str, - help='full filepath of the input file') - converter_group.add_argument('-p', - '--python_path', - type=str, - help='full filepath of the python model file') - converter_group.add_argument('-o', - '--output_path', - type=str, - help='full filepath of the output file') + converter_group.add_argument( + '-i', '--input_path', type=str, help='full filepath of the input file') + converter_group.add_argument( + '-p', '--python_path', type=str, help='full filepath of the python model file') + converter_group.add_argument( + '-o', '--output_path', type=str, help='full filepath of the output file') # input arrays. converter_group.add_argument( @@ -87,14 +81,14 @@ def _get_parser(): tf2tflite_group.add_argument('--converter_version', default='v2') parser.add_argument('--unroll_rnn', action='store_true', help='Unroll RNN operators') - parser.add_argument('--unroll_lstm', - action='store_true', - help='Unroll LSTM operators') + parser.add_argument( + '--unroll_lstm', action='store_true', help='Unroll LSTM operators') # save intermediate file(s) - parser.add_argument('--save_intermediate', - action='store_true', - help='Save intermediate files to output folder') + parser.add_argument( + '--save_intermediate', + action='store_true', + help='Save intermediate files to output folder') return parser @@ -181,8 +175,7 @@ def _merge_module(module): def _list_classes_from_module(module): # Parsing the module to get all defined classes - is_member = lambda member: inspect.isclass(member - ) and member.__module__ == module.__name__ + is_member = lambda member: inspect.isclass(member) and member.__module__ == module.__name__ classes = [cls[1] for cls in inspect.getmembers(module, is_member)] return classes @@ -300,11 +293,12 @@ def _convert(args): f.write(('Trying to save onnx model using opset version ' + str(onnx_opset_version) + '\n').encode()) try: - torch.onnx.export(pytorch_model, - tuple(sample_inputs), - onnx_output_path, - example_outputs=sample_outputs, - opset_version=onnx_opset_version) + torch.onnx.export( + pytorch_model, + tuple(sample_inputs), + onnx_output_path, + example_outputs=sample_outputs, + opset_version=onnx_opset_version) onnx_saved = True break except: @@ -335,9 +329,8 @@ def _convert(args): tf2tfliteV2_output_path = os.path.join(tmpdir, tf2tfliteV2_output_name) del args.input_shapes - tf2tfliteV2_cmd = _make_cmd.make_tf2tfliteV2_cmd(args, tf2tfliteV2_path, - savedmodel_output_path, - tf2tfliteV2_output_path) + tf2tfliteV2_cmd = _make_cmd.make_tf2tfliteV2_cmd( + args, tf2tfliteV2_path, savedmodel_output_path, tf2tfliteV2_output_path) f.write((' '.join(tf2tfliteV2_cmd) + '\n').encode()) diff --git a/compiler/one-cmds/one-import-tf b/compiler/one-cmds/one-import-tf index 6b8d3841dd6..75d19680dab 100644 --- a/compiler/one-cmds/one-import-tf +++ b/compiler/one-cmds/one-import-tf @@ -42,54 +42,56 @@ def _get_parser(): # converter version converter_version = tf2tfliteV2_group.add_mutually_exclusive_group() - converter_version.add_argument('--v1', - action='store_const', - dest='converter_version_cmd', - const='--v1', - help='use TensorFlow Lite Converter 1.x') - converter_version.add_argument('--v2', - action='store_const', - dest='converter_version_cmd', - const='--v2', - help='use TensorFlow Lite Converter 2.x') + converter_version.add_argument( + '--v1', + action='store_const', + dest='converter_version_cmd', + const='--v1', + help='use TensorFlow Lite Converter 1.x') + converter_version.add_argument( + '--v2', + action='store_const', + dest='converter_version_cmd', + const='--v2', + help='use TensorFlow Lite Converter 2.x') parser.add_argument('--converter_version', type=str, help=argparse.SUPPRESS) # input model format model_format_arg = tf2tfliteV2_group.add_mutually_exclusive_group() - model_format_arg.add_argument('--graph_def', - action='store_const', - dest='model_format_cmd', - const='--graph_def', - help='use graph def file(default)') - model_format_arg.add_argument('--saved_model', - action='store_const', - dest='model_format_cmd', - const='--saved_model', - help='use saved model') - model_format_arg.add_argument('--keras_model', - action='store_const', - dest='model_format_cmd', - const='--keras_model', - help='use keras model') + model_format_arg.add_argument( + '--graph_def', + action='store_const', + dest='model_format_cmd', + const='--graph_def', + help='use graph def file(default)') + model_format_arg.add_argument( + '--saved_model', + action='store_const', + dest='model_format_cmd', + const='--saved_model', + help='use saved model') + model_format_arg.add_argument( + '--keras_model', + action='store_const', + dest='model_format_cmd', + const='--keras_model', + help='use keras model') parser.add_argument('--model_format', type=str, help=argparse.SUPPRESS) # input and output path. - tf2tfliteV2_group.add_argument('-i', - '--input_path', - type=str, - help='full filepath of the input file') - tf2tfliteV2_group.add_argument('-o', - '--output_path', - type=str, - help='full filepath of the output file') + tf2tfliteV2_group.add_argument( + '-i', '--input_path', type=str, help='full filepath of the input file') + tf2tfliteV2_group.add_argument( + '-o', '--output_path', type=str, help='full filepath of the output file') # input and output arrays. - tf2tfliteV2_group.add_argument('-I', - '--input_arrays', - type=str, - help='names of the input arrays, comma-separated') + tf2tfliteV2_group.add_argument( + '-I', + '--input_arrays', + type=str, + help='names of the input arrays, comma-separated') tf2tfliteV2_group.add_argument( '-s', '--input_shapes', @@ -97,20 +99,23 @@ def _get_parser(): help= 'shapes corresponding to --input_arrays, colon-separated (ex:"1,4,4,3:1,20,20,3")' ) - tf2tfliteV2_group.add_argument('-O', - '--output_arrays', - type=str, - help='names of the output arrays, comma-separated') + tf2tfliteV2_group.add_argument( + '-O', + '--output_arrays', + type=str, + help='names of the output arrays, comma-separated') # save intermediate file(s) - parser.add_argument('--save_intermediate', - action='store_true', - help='Save intermediate files to output folder') + parser.add_argument( + '--save_intermediate', + action='store_true', + help='Save intermediate files to output folder') # experimental options - parser.add_argument('--experimental_disable_batchmatmul_unfold', - action='store_true', - help='Experimental disable BatchMatMul unfold') + parser.add_argument( + '--experimental_disable_batchmatmul_unfold', + action='store_true', + help='Experimental disable BatchMatMul unfold') return parser diff --git a/compiler/one-cmds/one-import-tflite b/compiler/one-cmds/one-import-tflite index edd24f44500..8eba46dc542 100644 --- a/compiler/one-cmds/one-import-tflite +++ b/compiler/one-cmds/one-import-tflite @@ -44,14 +44,10 @@ def _get_parser(): tflite2circle_group = parser.add_argument_group('converter arguments') # input and output path. - tflite2circle_group.add_argument('-i', - '--input_path', - type=str, - help='full filepath of the input file') - tflite2circle_group.add_argument('-o', - '--output_path', - type=str, - help='full filepath of the output file') + tflite2circle_group.add_argument( + '-i', '--input_path', type=str, help='full filepath of the input file') + tflite2circle_group.add_argument( + '-o', '--output_path', type=str, help='full filepath of the output file') return parser diff --git a/compiler/one-cmds/one-infer b/compiler/one-cmds/one-infer index 9c2dfddb2fb..075e2bfa202 100644 --- a/compiler/one-cmds/one-infer +++ b/compiler/one-cmds/one-infer @@ -41,10 +41,11 @@ one-infer provides post-processing after invoking backend inference driver use python script and its arguments to '--post-process' argument as below one-infer -d dummy-infer --post-process "script.py arg1 arg2" -- [arguments for dummy-infer] """ - parser = argparse.ArgumentParser(description='command line tool to infer model', - usage=infer_usage, - epilog=infer_detail, - formatter_class=argparse.RawTextHelpFormatter) + parser = argparse.ArgumentParser( + description='command line tool to infer model', + usage=infer_usage, + epilog=infer_detail, + formatter_class=argparse.RawTextHelpFormatter) oneutils.add_default_arg(parser) diff --git a/compiler/one-cmds/one-init b/compiler/one-cmds/one-init index 2de1cde9220..63ce3dbfa5d 100644 --- a/compiler/one-cmds/one-init +++ b/compiler/one-cmds/one-init @@ -42,6 +42,7 @@ class InputOutputPath: After calling enter_new_section(), output path in section k will be used as input path of section k+1 ''' + def __init__(self, initial_input_path: str): self._first_step = True self._input_path = initial_input_path @@ -73,6 +74,7 @@ class CommentableConfigParser(configparser.ConfigParser): value is None. Ref: https://stackoverflow.com/questions/6620637/writing-comments-to-files-with-configparser """ + def __init__(self): # allow_no_value=True to add comment # ref: https://stackoverflow.com/a/19432072 @@ -114,20 +116,17 @@ def _get_parser(backends_list): oneutils.add_default_arg_no_CS(parser) - parser.add_argument('-i', - '--input_path', - type=str, - help='full filepath of the input model file') - parser.add_argument('-o', - '--output_path', - type=str, - help='full filepath of the output cfg file') - parser.add_argument('-m', - '--model_type', - type=str, - help=('type of input model: "onnx", "tflite". ' - 'If the file extension passed to --input_path is ' - '".tflite" or ".onnx", this arg can be omitted.')) + parser.add_argument( + '-i', '--input_path', type=str, help='full filepath of the input model file') + parser.add_argument( + '-o', '--output_path', type=str, help='full filepath of the output cfg file') + parser.add_argument( + '-m', + '--model_type', + type=str, + help=('type of input model: "onnx", "tflite". ' + 'If the file extension passed to --input_path is ' + '".tflite" or ".onnx", this arg can be omitted.')) onnx_group = parser.add_argument_group('arguments when model type is onnx') onnx_group.add_argument( diff --git a/compiler/one-cmds/one-optimize b/compiler/one-cmds/one-optimize index c60561f9990..51668a81611 100644 --- a/compiler/one-cmds/one-optimize +++ b/compiler/one-cmds/one-optimize @@ -40,10 +40,11 @@ def _get_parser(): ## utility arguments utility_group = parser.add_argument_group('arguments for utility') - utility_group.add_argument('-p', - '--generate_profile_data', - action='store_true', - help='generate profiling data') + utility_group.add_argument( + '-p', + '--generate_profile_data', + action='store_true', + help='generate profiling data') utility_group.add_argument( '--change_outputs', @@ -54,14 +55,10 @@ def _get_parser(): circle2circle_group = parser.add_argument_group('arguments for optimization') # input and output path. - circle2circle_group.add_argument('-i', - '--input_path', - type=str, - help='full filepath of the input file') - circle2circle_group.add_argument('-o', - '--output_path', - type=str, - help='full filepath of the output file') + circle2circle_group.add_argument( + '-i', '--input_path', type=str, help='full filepath of the input file') + circle2circle_group.add_argument( + '-o', '--output_path', type=str, help='full filepath of the output file') # optimization pass for opt in _constant.CONSTANT.OPTIMIZATION_OPTS: diff --git a/compiler/one-cmds/one-pack b/compiler/one-cmds/one-pack index 5bddec99b69..db42466206c 100644 --- a/compiler/one-cmds/one-pack +++ b/compiler/one-cmds/one-pack @@ -39,14 +39,10 @@ def _get_parser(): model2nnpkg_group = parser.add_argument_group('arguments for packaging') # input and output path. - model2nnpkg_group.add_argument('-i', - '--input_path', - type=str, - help='full filepath of the input file') - model2nnpkg_group.add_argument('-o', - '--output_path', - type=str, - help='full filepath of the output file') + model2nnpkg_group.add_argument( + '-i', '--input_path', type=str, help='full filepath of the input file') + model2nnpkg_group.add_argument( + '-o', '--output_path', type=str, help='full filepath of the output file') return parser diff --git a/compiler/one-cmds/one-partition b/compiler/one-cmds/one-partition index 728a5064b05..62ab13d3912 100644 --- a/compiler/one-cmds/one-partition +++ b/compiler/one-cmds/one-partition @@ -36,14 +36,12 @@ def _get_parser(): oneutils.add_default_arg(parser) - parser.add_argument('--backends', - type=str, - help='backends in CSV to use for partitioning') + parser.add_argument( + '--backends', type=str, help='backends in CSV to use for partitioning') parser.add_argument('--default', type=str, help='default backend to assign') - parser.add_argument('--part_file', - type=str, - help='partition file which provides backend to assign') + parser.add_argument( + '--part_file', type=str, help='partition file which provides backend to assign') parser.add_argument('--input_file', type=str, help='input circle model filename') parser.add_argument( '--work_path', diff --git a/compiler/one-cmds/one-profile b/compiler/one-cmds/one-profile index 2477a350bf2..9296e519792 100644 --- a/compiler/one-cmds/one-profile +++ b/compiler/one-cmds/one-profile @@ -91,10 +91,8 @@ def _get_parser(backends_list): backends_name) + ')' backend_help_message = 'backend name to use ' + backends_name_message parser.add_argument('-b', '--backend', type=str, help=backend_help_message) - parser.add_argument('-T', - '--target', - type=str, - help='run with specific target of the backend') + parser.add_argument( + '-T', '--target', type=str, help='run with specific target of the backend') return parser @@ -253,9 +251,8 @@ def main(): # check if command schema exists # 1. if it exists, run the command according to the schema. # 2. if it doesn't exist, insert "--target ${TARGET}" at the beginning of the given command. - parser = oneutils.get_arg_parser(given_backend, - cmd="profile", - target=target_to_run) + parser = oneutils.get_arg_parser( + given_backend, cmd="profile", target=target_to_run) # [11], [13] if target_to_run and parser: if oneutils.is_valid_attr(cfg_args, 'command'): diff --git a/compiler/one-cmds/one-quantize b/compiler/one-cmds/one-quantize index ab77e7cd84f..a7dc59168b7 100644 --- a/compiler/one-cmds/one-quantize +++ b/compiler/one-cmds/one-quantize @@ -39,10 +39,8 @@ def _get_parser(): oneutils.add_default_arg(parser) # input and output path. - parser.add_argument('-i', - '--input_path', - type=str, - help='full filepath of the input circle model') + parser.add_argument( + '-i', '--input_path', type=str, help='full filepath of the input circle model') parser.add_argument( '-d', '--input_data', @@ -57,21 +55,24 @@ def _get_parser(): """file format of input data. h5/hdf5 (default), list/filelist (a text file where a file path of input data is written in each line), or dir/directory (a directory where input data are saved)""") - parser.add_argument('-o', - '--output_path', - type=str, - help='full filepath of the output quantized model') + parser.add_argument( + '-o', + '--output_path', + type=str, + help='full filepath of the output quantized model') # argument for profiling - parser.add_argument('-p', - '--generate_profile_data', - action='store_true', - help='generate profiling data') + parser.add_argument( + '-p', + '--generate_profile_data', + action='store_true', + help='generate profiling data') # save intermediate file(s) - parser.add_argument('--save_intermediate', - action='store_true', - help='Save intermediate files to output folder') + parser.add_argument( + '--save_intermediate', + action='store_true', + help='Save intermediate files to output folder') ## arguments for quantization quantization_group = parser.add_argument_group('arguments for quantization') @@ -150,18 +151,16 @@ def _get_parser(): help="""Save min/max of each tensor. NOTE: Min/max valuse are clipped according to calibration algorithms, such as percentile or moving average. Nudge adjustment is not applied.""") - quantization_group.add_argument('--quant_config', - type=str, - help="Path to the quantization configuration file.") + quantization_group.add_argument( + '--quant_config', type=str, help="Path to the quantization configuration file.") quantization_group.add_argument( '--evaluate_result', action='store_true', help= """Evaluate accuracy of quantized model. Run inference for both fp32 model and the quantized model, and compare the inference results.""") - quantization_group.add_argument('--test_data', - type=str, - help="Path to the test data used for evaluation.") + quantization_group.add_argument( + '--test_data', type=str, help="Path to the test data used for evaluation.") quantization_group.add_argument( '--print_mae', action='store_true', @@ -209,18 +208,12 @@ def _get_parser(): help= 'overwrite quantparam (scale, zero_point) to the specified tensor in the quantized model.' ) - force_quantparam_group.add_argument('--tensor_name', - type=str, - action='append', - help='tensor name (string)') - force_quantparam_group.add_argument('--scale', - type=float, - action='append', - help='scale (float)') - force_quantparam_group.add_argument('--zero_point', - type=int, - action='append', - help='zero point (int)') + force_quantparam_group.add_argument( + '--tensor_name', type=str, action='append', help='tensor name (string)') + force_quantparam_group.add_argument( + '--scale', type=float, action='append', help='scale (float)') + force_quantparam_group.add_argument( + '--zero_point', type=int, action='append', help='zero point (int)') # arguments for copy_quantparam option copy_quantparam_group = parser.add_argument_group( @@ -230,14 +223,10 @@ def _get_parser(): '--copy_quantparam', action='store_true', help='copy quantparam (scale, zero_point) of a tensor to another tensor.') - copy_quantparam_group.add_argument('--src_tensor_name', - type=str, - action='append', - help='tensor name (string)') - copy_quantparam_group.add_argument('--dst_tensor_name', - type=str, - action='append', - help='tensor name (string)') + copy_quantparam_group.add_argument( + '--src_tensor_name', type=str, action='append', help='tensor name (string)') + copy_quantparam_group.add_argument( + '--dst_tensor_name', type=str, action='append', help='tensor name (string)') # arguments for fake_quant option fake_quant_group = parser.add_argument_group('arguments for fake_quantize option') @@ -259,23 +248,19 @@ def _get_parser(): # arguments for ampq option ampq_quant_group = parser.add_argument_group('arguments for ampq option') # ampq - ampq_quant_group.add_argument('--ampq', - action='store_true', - help='quantize model using ampq solver.') + ampq_quant_group.add_argument( + '--ampq', action='store_true', help='quantize model using ampq solver.') # ampq_qerror_ratio - ampq_quant_group.add_argument('--ampq_qerror_ratio', - type=str, - help='quantization error ratio ([0, 1])') + ampq_quant_group.add_argument( + '--ampq_qerror_ratio', type=str, help='quantization error ratio ([0, 1])') # ampq_algorithm - ampq_quant_group.add_argument('--ampq_algorithm', - type=str, - help='type of algorithm (bisection, pattern)') + ampq_quant_group.add_argument( + '--ampq_algorithm', type=str, help='type of algorithm (bisection, pattern)') - ampq_quant_group.add_argument('--bisection_type', - type=str, - help="one of 'auto', 'i16_front', 'i16_back'") + ampq_quant_group.add_argument( + '--bisection_type', type=str, help="one of 'auto', 'i16_front', 'i16_back'") ampq_quant_group.add_argument( '--u8_layernorm_with_s16_variance', @@ -288,9 +273,10 @@ def _get_parser(): help='Use int16 for computing Sub and Exp nodes in uint8 Softmax') # ampq_bisection_visq - ampq_quant_group.add_argument('--ampq_bisection_visq', - type=str, - help='.visq.json file path with quantization errors') + ampq_quant_group.add_argument( + '--ampq_bisection_visq', + type=str, + help='.visq.json file path with quantization errors') return parser @@ -761,9 +747,8 @@ def _ampq_solve(args): f.write((' '.join(circle_quantizer_cmd) + '\n').encode()) # run circle-quantizer - oneutils.run(circle_quantizer_cmd, - err_prefix="circle_quantizer", - logfile=f) + oneutils.run( + circle_quantizer_cmd, err_prefix="circle_quantizer", logfile=f) # compute visq file visq_path = os.path.join(dir_path, 'visq') diff --git a/compiler/one-cmds/onecc b/compiler/one-cmds/onecc index 6deaa444115..dbbd4d0f680 100644 --- a/compiler/one-cmds/onecc +++ b/compiler/one-cmds/onecc @@ -73,16 +73,11 @@ def get_parser(): opt_help_message = 'optimization name to use ' + opt_help_message parser.add_argument('-O', type=str, metavar='OPTIMIZATION', help=opt_help_message) - parser.add_argument('-W', - '--workflow', - type=str, - metavar='WORKFLOW', - help='run with workflow file') - - parser.add_argument('-b', - '--backend', - type=str, - help='generate code for given backend') + parser.add_argument( + '-W', '--workflow', type=str, metavar='WORKFLOW', help='run with workflow file') + + parser.add_argument( + '-b', '--backend', type=str, help='generate code for given backend') target_name_list: List[str] = oneutils.get_target_list(get_name=True) if not target_name_list: diff --git a/compiler/one-cmds/onelib/WorkflowRunner.py b/compiler/one-cmds/onelib/WorkflowRunner.py index f7ec9f5a464..52bd253ff5a 100644 --- a/compiler/one-cmds/onelib/WorkflowRunner.py +++ b/compiler/one-cmds/onelib/WorkflowRunner.py @@ -88,14 +88,12 @@ def _verify_workflow(self, json_contents): # each workflow should have either WORKFLOW_STEPS_K or CFG_REFERENCE_K for workflow_k in workflows: - if not self.WORKFLOW_STEPS_K in json_contents[ - workflow_k] and not self.CFG_REFERENCE_K in json_contents[workflow_k]: + if not self.WORKFLOW_STEPS_K in json_contents[workflow_k] and not self.CFG_REFERENCE_K in json_contents[workflow_k]: raise ValueError("Each workflow should have either \"" + self.WORKFLOW_STEPS_K + "\" or \"" + self.CFG_REFERENCE_K + "\"") for workflow_k in workflows: - if self.WORKFLOW_STEPS_K in json_contents[ - workflow_k] and self.CFG_REFERENCE_K in json_contents[workflow_k]: + if self.WORKFLOW_STEPS_K in json_contents[workflow_k] and self.CFG_REFERENCE_K in json_contents[workflow_k]: raise ValueError("\"" + self.WORKFLOW_STEPS_K + "\" and \"" + self.CFG_REFERENCE_K + "\" are exclusive key") diff --git a/compiler/one-cmds/onelib/argumentparse.py b/compiler/one-cmds/onelib/argumentparse.py index 7b266cf5955..f140061d02a 100644 --- a/compiler/one-cmds/onelib/argumentparse.py +++ b/compiler/one-cmds/onelib/argumentparse.py @@ -120,8 +120,8 @@ class ArgumentParser(): def __init__(self): # List[args, action type, data type, option type] - self._actions: List[Tuple[Tuple[str], Action, Union[Type[str], - Type[bool]]]] = list() + self._actions: List[Tuple[Tuple[str], Action, Union[Type[str], Type[ + bool]]]] = list() self.driver: str = None self.target: str = None diff --git a/compiler/one-cmds/onelib/backends.py b/compiler/one-cmds/onelib/backends.py index 4403a07cbb6..362c8e84872 100644 --- a/compiler/one-cmds/onelib/backends.py +++ b/compiler/one-cmds/onelib/backends.py @@ -93,8 +93,8 @@ def search_driver(driver): return driver_path # CASE 2: one/backends/**/bin/{driver} is found - for driver_path in glob.glob(dir_path + '/../../backends/**/bin/' + driver, - recursive=True): + for driver_path in glob.glob( + dir_path + '/../../backends/**/bin/' + driver, recursive=True): if os.path.isfile(driver_path) and os.access(driver_path, os.X_OK): return driver_path diff --git a/compiler/one-cmds/onelib/export_constant.py b/compiler/one-cmds/onelib/export_constant.py index e33dd9ec0ca..7a2de1e8a40 100644 --- a/compiler/one-cmds/onelib/export_constant.py +++ b/compiler/one-cmds/onelib/export_constant.py @@ -23,11 +23,8 @@ def main(): parser = argparse.ArgumentParser( description='Export CONSTANT value with given file format.') - parser.add_argument('-c', - '--constant', - type=str, - required=True, - help='Constant name to export') + parser.add_argument( + '-c', '--constant', type=str, required=True, help='Constant name to export') parser.add_argument( '-f', '--format', @@ -41,11 +38,8 @@ def main(): '--exclusive', action='store_true', help='Exports the rest of the options except for the given constant') - parser.add_argument('-o', - '--output_path', - type=str, - required=True, - help='Path to output') + parser.add_argument( + '-o', '--output_path', type=str, required=True, help='Path to output') args = parser.parse_args() diff --git a/compiler/one-cmds/onelib/utils.py b/compiler/one-cmds/onelib/utils.py index 6bb8aa03665..c7ef184a184 100644 --- a/compiler/one-cmds/onelib/utils.py +++ b/compiler/one-cmds/onelib/utils.py @@ -59,16 +59,18 @@ def one_cmd_list(): def add_default_arg(parser): # version - parser.add_argument('-v', - '--version', - action='store_true', - help='show program\'s version number and exit') + parser.add_argument( + '-v', + '--version', + action='store_true', + help='show program\'s version number and exit') # verbose - parser.add_argument('-V', - '--verbose', - action='store_true', - help='output additional information to stdout or stderr') + parser.add_argument( + '-V', + '--verbose', + action='store_true', + help='output additional information to stdout or stderr') # configuration file parser.add_argument('-C', '--config', type=str, help='run with configuation file') @@ -81,16 +83,18 @@ def add_default_arg_no_CS(parser): This adds -v -V args only (no -C nor -S) """ # version - parser.add_argument('-v', - '--version', - action='store_true', - help='show program\'s version number and exit') + parser.add_argument( + '-v', + '--version', + action='store_true', + help='show program\'s version number and exit') # verbose - parser.add_argument('-V', - '--verbose', - action='store_true', - help='output additional information to stdout or stderr') + parser.add_argument( + '-V', + '--verbose', + action='store_true', + help='output additional information to stdout or stderr') def is_accumulated_arg(arg, driver): diff --git a/compiler/one-cmds/onnx_legalizer.py b/compiler/one-cmds/onnx_legalizer.py index 659944c3022..0141514b691 100755 --- a/compiler/one-cmds/onnx_legalizer.py +++ b/compiler/one-cmds/onnx_legalizer.py @@ -86,6 +86,7 @@ class _ModelTransformerHelper: in that case _base_name_idx["t_"] == 4. This attribute is used for unique tensor name generation. """ + def __init__(self, model): self._model = model self._nodes_to_delete = [] @@ -155,10 +156,8 @@ def make_split(self, input, split_sizes, axis): Returns: list: list of output tensor names """ - return self.make_node('Split', [input], - len(split_sizes), - axis=axis, - split=split_sizes) + return self.make_node( + 'Split', [input], len(split_sizes), axis=axis, split=split_sizes) def make_concat(self, inputs, axis): """Create Concat operation and insert it in graph. @@ -211,10 +210,8 @@ def make_gemm(self, A, B, C, trans_a=False, trans_b=False): Returns: str: output tensor name """ - return self.make_node('Gemm', [A, B, C], - 1, - transA=bool(trans_a), - transB=bool(trans_b))[0] + return self.make_node( + 'Gemm', [A, B, C], 1, transA=bool(trans_a), transB=bool(trans_b))[0] def make_add(self, a, b): """Creates Add operation and insert it in graph. @@ -389,10 +386,8 @@ def _generate_one_direction_RNN(transformer, X, W, R, B, initial_h, clip, for i in range(first_iter, seq_length): state_tensor = transformer.make_gemm(X[i], W, B, trans_b=True) - state_tensor = transformer.make_gemm(previous_state_tensor, - R, - state_tensor, - trans_b=True) + state_tensor = transformer.make_gemm( + previous_state_tensor, R, state_tensor, trans_b=True) if clip != None: state_tensor = transformer.make_clip(state_tensor, min=-clip, max=clip) previous_state_tensor = transformer.make_act(state_tensor, activation_name) @@ -425,14 +420,13 @@ def _transform_unidirectional_RNN(transformer, original_node, x, tensor_infos, a r = transformer.make_squeeze(inputs[2], axes=[0]) if len(inputs) > 3 and inputs[3] != '': raw_bias_tensor = transformer.make_squeeze(inputs[3], axes=[0]) - splitted_bias_tensors = transformer.make_split(raw_bias_tensor, - split_sizes=[hidden_size] * 2, - axis=0) + splitted_bias_tensors = transformer.make_split( + raw_bias_tensor, split_sizes=[hidden_size] * 2, axis=0) b = transformer.make_add(splitted_bias_tensors[0], splitted_bias_tensors[1]) else: data_type = _dtype_to_np(tensor_infos[inputs[2]].dtype) - b = transformer.make_constant_tensor(np.zeros(hidden_size, dtype=data_type), - "zero_bias") + b = transformer.make_constant_tensor( + np.zeros(hidden_size, dtype=data_type), "zero_bias") if len(inputs) > 5 and inputs[5] != '': direction_dim = layout initial_h = transformer.make_squeeze(inputs[5], axes=[direction_dim]) @@ -451,8 +445,8 @@ def _transform_unidirectional_RNN(transformer, original_node, x, tensor_infos, a # use low-level interface to attach to existing tensors Y_h = outputs[1] - transformer.make_node('Unsqueeze', [state_tensors[-1]], [Y_h], - axes=[y_h_direction_dim]) + transformer.make_node( + 'Unsqueeze', [state_tensors[-1]], [Y_h], axes=[y_h_direction_dim]) Y = outputs[0] transformer.make_node('Concat', state_layout_tensors, [Y], axis=seq_length_dim) @@ -487,26 +481,24 @@ def _transform_bidirectional_RNN(transformer, original_node, x, tensor_infos, ac if len(inputs) > 3 and inputs[3] != '': raw_bias_tensors = transformer.make_split(inputs[3], split_sizes=[1, 1], axis=0) for d in range(2): - raw_bias_tensors_squeezed = transformer.make_squeeze(raw_bias_tensors[d], - axes=[0]) - splitted_bias_tensors = transformer.make_split(raw_bias_tensors_squeezed, - split_sizes=[hidden_size] * 2, - axis=0) + raw_bias_tensors_squeezed = transformer.make_squeeze( + raw_bias_tensors[d], axes=[0]) + splitted_bias_tensors = transformer.make_split( + raw_bias_tensors_squeezed, split_sizes=[hidden_size] * 2, axis=0) b += [ transformer.make_add(splitted_bias_tensors[0], splitted_bias_tensors[1]) ] else: data_type = _dtype_to_np(tensor_infos[inputs[2]].dtype) b = [ - transformer.make_constant_tensor(np.zeros(hidden_size, dtype=data_type), - "zero_bias") + transformer.make_constant_tensor( + np.zeros(hidden_size, dtype=data_type), "zero_bias") ] * 2 initial_h = [None, None] if len(inputs) > 5 and inputs[5] != '': direction_dim = layout - initial_h = transformer.make_split(inputs[5], - split_sizes=[1, 1], - axis=direction_dim) + initial_h = transformer.make_split( + inputs[5], split_sizes=[1, 1], axis=direction_dim) for d in range(2): initial_h[d] = transformer.make_squeeze(initial_h[d], axes=[direction_dim]) @@ -530,20 +522,20 @@ def _transform_bidirectional_RNN(transformer, original_node, x, tensor_infos, ac state_layout_tensors_b = transformer.make_unsqueeze( state_b, axes=[seq_length_dim, y_direction_dim]) state_layout_tensors += [ - transformer.make_concat([state_layout_tensors_f, state_layout_tensors_b], - axis=y_direction_dim) + transformer.make_concat( + [state_layout_tensors_f, state_layout_tensors_b], axis=y_direction_dim) ] - last_f_state_layout_tensor = transformer.make_unsqueeze(state_f_tensors[-1], - axes=[y_h_direction_dim]) - last_b_state_layout_tensor = transformer.make_unsqueeze(state_b_tensors[0], - axes=[y_h_direction_dim]) + last_f_state_layout_tensor = transformer.make_unsqueeze( + state_f_tensors[-1], axes=[y_h_direction_dim]) + last_b_state_layout_tensor = transformer.make_unsqueeze( + state_b_tensors[0], axes=[y_h_direction_dim]) # use low-level interface to attach to existing tensors Y_h = outputs[1] - transformer.make_node('Concat', - [last_f_state_layout_tensor, last_b_state_layout_tensor], [Y_h], - axis=y_h_direction_dim) + transformer.make_node( + 'Concat', [last_f_state_layout_tensor, last_b_state_layout_tensor], [Y_h], + axis=y_h_direction_dim) Y = outputs[0] transformer.make_node('Concat', state_layout_tensors, [Y], axis=seq_length_dim) @@ -597,9 +589,8 @@ def _legalize_RNN(transformer, tensor_infos, node): if hidden_size == 0: hidden_size = tensor_infos[inputs[2]].shape[2] - input_split_tensor = transformer.make_split(inputs[0], - split_sizes=[1] * seq_length, - axis=seq_length_dim) + input_split_tensor = transformer.make_split( + inputs[0], split_sizes=[1] * seq_length, axis=seq_length_dim) x = [] for i in range(len(input_split_tensor)): input_frame_tensor = input_split_tensor[i] @@ -677,9 +668,8 @@ def _generate_one_direction_LSTM(transformer, X, W, R, B, initial_h, initial_c, R = {'i': r_tensors[0], 'o': r_tensors[1], 'f': r_tensors[2], 'c': r_tensors[3]} if B is not None: - separate_b_tensors = transformer.make_split(B, - split_sizes=[hidden_size] * 8, - axis=0) + separate_b_tensors = transformer.make_split( + B, split_sizes=[hidden_size] * 8, axis=0) b_tensors = [] for i in range(4): b_tensors += [ @@ -687,8 +677,8 @@ def _generate_one_direction_LSTM(transformer, X, W, R, B, initial_h, initial_c, ] else: b_tensors = [ - transformer.make_constant_tensor(np.zeros( - (hidden_size), dtype=dtype), 'zero_b') + transformer.make_constant_tensor( + np.zeros((hidden_size), dtype=dtype), 'zero_b') ] * 4 B = {'i': b_tensors[0], 'o': b_tensors[1], 'f': b_tensors[2], 'c': b_tensors[3]} @@ -708,8 +698,8 @@ def _generate_one_direction_LSTM(transformer, X, W, R, B, initial_h, initial_c, p_tensors = transformer.make_split(P, split_sizes=[hidden_size] * 3, axis=0) P = {'i': p_tensors[0], 'o': p_tensors[1], 'f': p_tensors[2]} else: - zero = transformer.make_constant_tensor(np.zeros((hidden_size), dtype=dtype), - 'zero_peephole') + zero = transformer.make_constant_tensor( + np.zeros((hidden_size), dtype=dtype), 'zero_peephole') P = {'i': zero, 'o': zero, 'f': zero} for i in range(seq_length): @@ -824,8 +814,8 @@ def _transform_unidirectional_LSTM(transformer, original_node, x, tensor_infos, # use low-level interface to attach to existing tensors Y_h = outputs[1] - transformer.make_node('Unsqueeze', [state_h_tensors[-1]], [Y_h], - axes=[y_h_direction_dim]) + transformer.make_node( + 'Unsqueeze', [state_h_tensors[-1]], [Y_h], axes=[y_h_direction_dim]) Y_c = outputs[2] transformer.make_node('Unsqueeze', [state_c_tensor], [Y_c], axes=[y_h_direction_dim]) if direction == 'reverse': @@ -868,18 +858,16 @@ def _transform_bidirectional_LSTM(transformer, original_node, x, tensor_infos, initial_h = [None, None] if len(inputs) > 5 and inputs[5] != '': direction_dim = layout - initial_h = transformer.make_split(inputs[5], - split_sizes=[1, 1], - axis=direction_dim) + initial_h = transformer.make_split( + inputs[5], split_sizes=[1, 1], axis=direction_dim) for d in range(2): initial_h[d] = transformer.make_squeeze(initial_h[d], axes=[direction_dim]) initial_c = [None, None] if len(inputs) > 6 and inputs[6] != '': direction_dim = layout - initial_c = transformer.make_split(inputs[6], - split_sizes=[1, 1], - axis=direction_dim) + initial_c = transformer.make_split( + inputs[6], split_sizes=[1, 1], axis=direction_dim) for d in range(2): initial_c[d] = transformer.make_squeeze(initial_c[d], axes=[direction_dim]) @@ -921,19 +909,19 @@ def _transform_bidirectional_LSTM(transformer, original_node, x, tensor_infos, state_b_layout_tensors = transformer.make_unsqueeze( b_h_state, axes=[seq_length_dim, y_direction_dim]) state_layout_tensors += [ - transformer.make_concat([state_f_layout_tensors, state_b_layout_tensors], - axis=y_direction_dim) + transformer.make_concat( + [state_f_layout_tensors, state_b_layout_tensors], axis=y_direction_dim) ] - last_f_state_layout_tensor = transformer.make_unsqueeze(state_f_h_tensors[-1], - axes=[y_c_direction_dim]) - last_b_state_layout_tensor = transformer.make_unsqueeze(state_b_h_tensors[0], - axes=[y_c_direction_dim]) + last_f_state_layout_tensor = transformer.make_unsqueeze( + state_f_h_tensors[-1], axes=[y_c_direction_dim]) + last_b_state_layout_tensor = transformer.make_unsqueeze( + state_b_h_tensors[0], axes=[y_c_direction_dim]) Y_h = outputs[1] - transformer.make_node('Concat', - [last_f_state_layout_tensor, last_b_state_layout_tensor], [Y_h], - axis=y_c_direction_dim) + transformer.make_node( + 'Concat', [last_f_state_layout_tensor, last_b_state_layout_tensor], [Y_h], + axis=y_c_direction_dim) Y_f_c = transformer.make_unsqueeze(state_f_c_tensor, axes=[y_c_direction_dim]) Y_b_c = transformer.make_unsqueeze(state_b_c_tensor, axes=[y_c_direction_dim]) @@ -998,9 +986,8 @@ def _legalize_LSTM(transformer, tensor_infos, node): if hidden_size == 0: hidden_size = tensor_infos[inputs[2]].shape[2] - input_split_tensor = transformer.make_split(inputs[0], - split_sizes=[1] * seq_length, - axis=seq_length_dim) + input_split_tensor = transformer.make_split( + inputs[0], split_sizes=[1] * seq_length, axis=seq_length_dim) x = [] for i in range(len(input_split_tensor)): input_frame_tensor = input_split_tensor[i] diff --git a/compiler/one-cmds/tests/onnx_legalize_run_compare.py b/compiler/one-cmds/tests/onnx_legalize_run_compare.py index 2ab47bf0002..b6bcb73c6fb 100644 --- a/compiler/one-cmds/tests/onnx_legalize_run_compare.py +++ b/compiler/one-cmds/tests/onnx_legalize_run_compare.py @@ -58,9 +58,8 @@ def _run_model(model, inputs): options.intra_op_num_threads = 4 # NOTE set `providers` for https://github.com/microsoft/onnxruntime/issues/17631 providers = rt.get_available_providers() - session = rt.InferenceSession(model.SerializeToString(), - sess_options=options, - providers=providers) + session = rt.InferenceSession( + model.SerializeToString(), sess_options=options, providers=providers) outputs = session.run(output_names, inputs) return outputs diff --git a/compiler/one-cmds/validate-onnx2circle/validate_onnx2circle.py b/compiler/one-cmds/validate-onnx2circle/validate_onnx2circle.py index 74778cf866f..28217e4224b 100644 --- a/compiler/one-cmds/validate-onnx2circle/validate_onnx2circle.py +++ b/compiler/one-cmds/validate-onnx2circle/validate_onnx2circle.py @@ -70,9 +70,8 @@ def load(self): options.intra_op_num_threads = 4 # NOTE set `providers` for https://github.com/microsoft/onnxruntime/issues/17631 providers = ort.get_available_providers() - self.session = ort.InferenceSession(self.filepath, - sess_options=options, - providers=providers) + self.session = ort.InferenceSession( + self.filepath, sess_options=options, providers=providers) def feed_random_inputs(self): self.inputs = self.session.get_inputs() @@ -114,11 +113,13 @@ def get_outputs(self): # Execute luci interpreter print("Run luci-interpreter...") -process = subprocess.run([ - driver, circle_filepath, - str(onnx_runner.inputs_size), circle_filepath + ".input", circle_filepath + ".output" -], - check=True) +process = subprocess.run( + [ + driver, circle_filepath, + str(onnx_runner.inputs_size), circle_filepath + ".input", + circle_filepath + ".output" + ], + check=True) # Compare results rtolerance = 1e-03 diff --git a/compiler/onecc-docker/onecc-docker b/compiler/onecc-docker/onecc-docker index 627b5921a08..10c72f05c26 100644 --- a/compiler/onecc-docker/onecc-docker +++ b/compiler/onecc-docker/onecc-docker @@ -49,7 +49,8 @@ def _request_recent_version(token=None): # Return the latest version containing one-compiler-focal_{version}_amd64.deb in assets version = release_item["tag_name"] for asset in release_item["assets"]: - if bool(re.match(r'one-compiler-focal_\d+\.\d+\.\d_amd64.deb', + if bool( + re.match(r'one-compiler-focal_\d+\.\d+\.\d_amd64.deb', asset["name"])): return version @@ -58,10 +59,8 @@ def _request_recent_version(token=None): # 10 sec timeout is set based on github.com/Samsung/ONE/issues/11134 def _run(cmd, is_shell=False, timeout=10): - result = subprocess.Popen(cmd, - shell=is_shell, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + result = subprocess.Popen( + cmd, shell=is_shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = result.communicate(timeout=timeout) stdout = stdout.decode('utf-8') @@ -86,8 +85,8 @@ def main(): onecc_docker_usage = 'onecc-docker [-h] [-t TOKEN] [COMMAND ]' onecc_docker_desc = 'Run onecc via docker' - parser = argparse.ArgumentParser(usage=onecc_docker_usage, - description=onecc_docker_desc) + parser = argparse.ArgumentParser( + usage=onecc_docker_usage, description=onecc_docker_desc) parser.add_argument( "-t", "--token", diff --git a/compiler/onnx-tools/onnx-dump.py b/compiler/onnx-tools/onnx-dump.py index eb868312b69..f4b08aa389b 100644 --- a/compiler/onnx-tools/onnx-dump.py +++ b/compiler/onnx-tools/onnx-dump.py @@ -29,8 +29,8 @@ def _data_type_str(data_type): def _get_attribute_value(attr): if attr.type == AttributeProto.TENSOR: - return "{}, {}".format(_data_type_str(attr.t.data_type), - numpy_helper.to_array(attr.t)) + return "{}, {}".format( + _data_type_str(attr.t.data_type), numpy_helper.to_array(attr.t)) if attr.type == AttributeProto.GRAPH: # TODO revise when graph node is available return "" diff --git a/compiler/pota-quantization-value-test/compare_tensors_all.py b/compiler/pota-quantization-value-test/compare_tensors_all.py index 78af4f43e06..98fe1d05950 100644 --- a/compiler/pota-quantization-value-test/compare_tensors_all.py +++ b/compiler/pota-quantization-value-test/compare_tensors_all.py @@ -20,14 +20,16 @@ help= 'All the param list to test. e.g. ${RECIPE_NAME_0} ${GRANULARITY_0} ${DTYPE_0} ${RECIPE_NAME_1} ${GRANULARITY_1} ${DTYPE_1}..' ) -parser.add_argument('--bin_dir', - type=str, - required=True, - help='Directory path wehre test files are generated') -parser.add_argument('--source_dir', - type=str, - required=True, - help='Directory path where expected outputs exist') +parser.add_argument( + '--bin_dir', + type=str, + required=True, + help='Directory path wehre test files are generated') +parser.add_argument( + '--source_dir', + type=str, + required=True, + help='Directory path where expected outputs exist') parser.add_argument('--mode', type=str, required=True, help='Mode to test') args = parser.parse_args() @@ -115,12 +117,11 @@ def compare_quantization(tensor, tensor_name, expect_dir): if tensor["weights"].dtype == 'int64': abs_tolerance = 5 - if np.allclose(input_weights, expected_weights, rtol=0, - atol=abs_tolerance) == False: - logging.error("Quantized weights of " + tensor_name + " (" + - str(input_weights) + - ") do not match with expected value (" + - str(expected_weights) + ").") + if np.allclose( + input_weights, expected_weights, rtol=0, atol=abs_tolerance) == False: + logging.error( + "Quantized weights of " + tensor_name + " (" + str(input_weights) + + ") do not match with expected value (" + str(expected_weights) + ").") test_result = False if key == "scale": @@ -135,8 +136,8 @@ def compare_quantization(tensor, tensor_name, expect_dir): if key == "zero_point": expected_zero_point = np.array(json_load["zero_point"]) input_zero_point = tensor["zero_point"][:] - if np.allclose(input_zero_point, expected_zero_point, rtol=0, - atol=1) == False: + if np.allclose( + input_zero_point, expected_zero_point, rtol=0, atol=1) == False: logging.error("Quantized zero_point of " + tensor_name + " (" + str(input_zero_point) + ") do not match with expected value (" + diff --git a/compiler/pota-quantization-value-test/gen_h5_explicit_inputs_all.py b/compiler/pota-quantization-value-test/gen_h5_explicit_inputs_all.py index 707f823b27f..c14a54e4e32 100644 --- a/compiler/pota-quantization-value-test/gen_h5_explicit_inputs_all.py +++ b/compiler/pota-quantization-value-test/gen_h5_explicit_inputs_all.py @@ -37,18 +37,21 @@ def toNumpyType(circle_type): # 4, 5, 6 # parser = argparse.ArgumentParser() -parser.add_argument('--output_dir', - type=str, - required=True, - help='Output directory where the inputs are generated') -parser.add_argument('--artifact_dir', - type=str, - required=True, - help='Artifact directory where test files exist') -parser.add_argument('--input_dir', - type=str, - required=True, - help='Input directory where input text files exist') +parser.add_argument( + '--output_dir', + type=str, + required=True, + help='Output directory where the inputs are generated') +parser.add_argument( + '--artifact_dir', + type=str, + required=True, + help='Artifact directory where test files exist') +parser.add_argument( + '--input_dir', + type=str, + required=True, + help='Input directory where input text files exist') parser.add_argument( '--test_param', type=str, diff --git a/compiler/q-implant-qparam-test/q_implant_validator.py b/compiler/q-implant-qparam-test/q_implant_validator.py index d544cf34e74..1f3d8b608c9 100644 --- a/compiler/q-implant-qparam-test/q_implant_validator.py +++ b/compiler/q-implant-qparam-test/q_implant_validator.py @@ -34,8 +34,9 @@ def validate(h5_path, qparam_dir, qparam_json): if tensor_name == "value": expected_weights = np.load(np_path) h5_weights = model[node_name]["weights"][:] - if np.allclose(h5_weights, expected_weights, rtol=1.e-5, - atol=1.e-5) == False: + if np.allclose( + h5_weights, expected_weights, rtol=1.e-5, + atol=1.e-5) == False: print("Implanted weights of " + node_name + "." + tensor_name + " (" + str(h5_weights) + ") do not match with expected value (" + @@ -45,8 +46,8 @@ def validate(h5_path, qparam_dir, qparam_json): if tensor_name == "scale": expected_scale = np.load(np_path) h5_scale = model[node_name]["scale"][:] - if np.allclose(h5_scale, expected_scale, rtol=1.e-5, - atol=1.e-5) == False: + if np.allclose( + h5_scale, expected_scale, rtol=1.e-5, atol=1.e-5) == False: print("Implanted scale of " + node_name + "." + tensor_name + " (" + str(h5_scale) + ") do not match with expected value (" + diff --git a/compiler/record-minmax-thread-safety-test/gen_h5_random_inputs.py b/compiler/record-minmax-thread-safety-test/gen_h5_random_inputs.py index 5c72b6f7779..d57289abf1f 100644 --- a/compiler/record-minmax-thread-safety-test/gen_h5_random_inputs.py +++ b/compiler/record-minmax-thread-safety-test/gen_h5_random_inputs.py @@ -42,8 +42,9 @@ print(input_detail["dtype"]) if input_detail["dtype"] == np.bool_: # Generate random bool [0, 1] - input_data = np.array(np.random.random_integers(0, 1, input_detail["shape"]), - input_detail["dtype"]) + input_data = np.array( + np.random.random_integers(0, 1, input_detail["shape"]), + input_detail["dtype"]) elif input_detail["dtype"] == np.float32: # Generate random input [-5, 5) input_data = np.array(10 * np.random.random_sample(input_detail["shape"]) - 5, diff --git a/compiler/tf2tfliteV2/tf2tfliteV2.py b/compiler/tf2tfliteV2/tf2tfliteV2.py index e79c6ccb97a..2bcf553289d 100755 --- a/compiler/tf2tfliteV2/tf2tfliteV2.py +++ b/compiler/tf2tfliteV2/tf2tfliteV2.py @@ -43,55 +43,60 @@ def _get_parser(): description=("Command line tool to run TensorFlow Lite Converter.")) # Verbose - parser.add_argument("-V", - "--verbose", - action="store_true", - help="output additional information to stdout or stderr") + parser.add_argument( + "-V", + "--verbose", + action="store_true", + help="output additional information to stdout or stderr") # Converter version. converter_version = parser.add_mutually_exclusive_group(required=True) - converter_version.add_argument("--v1", - action="store_true", - help="Use TensorFlow Lite Converter 1.x") - converter_version.add_argument("--v2", - action="store_true", - help="Use TensorFlow Lite Converter 2.x") + converter_version.add_argument( + "--v1", action="store_true", help="Use TensorFlow Lite Converter 1.x") + converter_version.add_argument( + "--v2", action="store_true", help="Use TensorFlow Lite Converter 2.x") # Input model format model_format_arg = parser.add_mutually_exclusive_group() - model_format_arg.add_argument("--graph_def", - action="store_const", - dest="model_format", - const="graph_def", - help="Use graph def file(default)") - model_format_arg.add_argument("--saved_model", - action="store_const", - dest="model_format", - const="saved_model", - help="Use saved model") - model_format_arg.add_argument("--keras_model", - action="store_const", - dest="model_format", - const="keras_model", - help="Use keras model") + model_format_arg.add_argument( + "--graph_def", + action="store_const", + dest="model_format", + const="graph_def", + help="Use graph def file(default)") + model_format_arg.add_argument( + "--saved_model", + action="store_const", + dest="model_format", + const="saved_model", + help="Use saved model") + model_format_arg.add_argument( + "--keras_model", + action="store_const", + dest="model_format", + const="keras_model", + help="Use keras model") # Input and output path. - parser.add_argument("-i", - "--input_path", - type=str, - help="Full filepath of the input file.", - required=True) - parser.add_argument("-o", - "--output_path", - type=str, - help="Full filepath of the output file.", - required=True) + parser.add_argument( + "-i", + "--input_path", + type=str, + help="Full filepath of the input file.", + required=True) + parser.add_argument( + "-o", + "--output_path", + type=str, + help="Full filepath of the output file.", + required=True) # Input and output arrays. - parser.add_argument("-I", - "--input_arrays", - type=str, - help="Names of the input arrays, comma-separated.") + parser.add_argument( + "-I", + "--input_arrays", + type=str, + help="Names of the input arrays, comma-separated.") parser.add_argument( "-s", "--input_shapes", @@ -99,15 +104,17 @@ def _get_parser(): help= "Shapes corresponding to --input_arrays, colon-separated.(ex:\"1,4,4,3:1,20,20,3\")" ) - parser.add_argument("-O", - "--output_arrays", - type=str, - help="Names of the output arrays, comma-separated.") + parser.add_argument( + "-O", + "--output_arrays", + type=str, + help="Names of the output arrays, comma-separated.") # experimental options - parser.add_argument("--experimental_disable_batchmatmul_unfold", - action="store_true", - help="Experimental disable BatchMatMul unfold") + parser.add_argument( + "--experimental_disable_batchmatmul_unfold", + action="store_true", + help="Experimental disable BatchMatMul unfold") # Set default value parser.set_defaults(model_format="graph_def") diff --git a/compiler/visq-unittest/test/testPalette.py b/compiler/visq-unittest/test/testPalette.py index c72f22a914a..bf5fbb42e1e 100644 --- a/compiler/visq-unittest/test/testPalette.py +++ b/compiler/visq-unittest/test/testPalette.py @@ -34,10 +34,8 @@ def test_ylorrd9_wrong_minmax(self): for min_val, max_val in zip(min_test, max_test): # min must be less than max - self.assertRaises(RuntimeError, - YLORRD9Palette, - qerror_min=min_val, - qerror_max=max_val) + self.assertRaises( + RuntimeError, YLORRD9Palette, qerror_min=min_val, qerror_max=max_val) if __name__ == '__main__': diff --git a/compiler/visq/visq b/compiler/visq/visq index 649c87ed18c..6c3b94d11b8 100644 --- a/compiler/visq/visq +++ b/compiler/visq/visq @@ -42,16 +42,18 @@ from visqlib.DotBuilder import DotBuilder def _get_parser(): parser = argparse.ArgumentParser( description='Command line tool to visualize layer-wise quantization errors') - parser.add_argument("-f", - "--fp32_circle", - type=str, - help="Path to the fp32 circle model.", - required=True) - parser.add_argument("-q", - "--q_circle", - type=str, - help="Path to the quantized circle model.", - required=True) + parser.add_argument( + "-f", + "--fp32_circle", + type=str, + help="Path to the fp32 circle model.", + required=True) + parser.add_argument( + "-q", + "--q_circle", + type=str, + help="Path to the quantized circle model.", + required=True) parser.add_argument( "-d", "--data", @@ -59,31 +61,34 @@ def _get_parser(): help= "Path to the data used for inference. Random data will be used if this option is not given.", required=False) - parser.add_argument("--mpeir_output", - type=str, - help="Path to the output json file (qerror metric = MPEIR).", - required=False) - parser.add_argument("--mse_output", - type=str, - help="Path to the output json file (qerror metric = MSE).", - required=False) - parser.add_argument("--tae_output", - type=str, - help="Path to the output json file (qerror metric = TAE).", - required=False) - parser.add_argument("--srmse_output", - type=str, - help="Path to the output json file (qerror metric = SRMSE).", - required=False) - parser.add_argument("--dump_dot_graph", - action="store_true", - help="Dump dot graph.", - required=False) - parser.add_argument("-b", - "--batch_size", - type=int, - help="Batch size to process large datasets.", - required=False) + parser.add_argument( + "--mpeir_output", + type=str, + help="Path to the output json file (qerror metric = MPEIR).", + required=False) + parser.add_argument( + "--mse_output", + type=str, + help="Path to the output json file (qerror metric = MSE).", + required=False) + parser.add_argument( + "--tae_output", + type=str, + help="Path to the output json file (qerror metric = TAE).", + required=False) + parser.add_argument( + "--srmse_output", + type=str, + help="Path to the output json file (qerror metric = SRMSE).", + required=False) + parser.add_argument( + "--dump_dot_graph", action="store_true", help="Dump dot graph.", required=False) + parser.add_argument( + "-b", + "--batch_size", + type=int, + help="Batch size to process large datasets.", + required=False) return parser @@ -243,10 +248,8 @@ def _save_dot(circle_path: str, dot_path: str, metric: str, colors: list, qerror # metric: Metric name (ex: MPEIR, MSE) # colors: list [{'b': begin, 'e': end, 'c':color}, ..] # qerror: dict {tensor_name (str) -> qerror (float)} - builder = DotBuilder(circle_path=circle_path, - dot_path=dot_path, - metric=metric, - colors=colors) + builder = DotBuilder( + circle_path=circle_path, dot_path=dot_path, metric=metric, colors=colors) builder.save(qerror) @@ -270,19 +273,21 @@ def run_on_data_batchwise(fp32_model, q_model, data, dump_dot_graph, computers, qerror_map, q_min, q_max = cur_computer.get_final_result() palette = YLORRD9Palette(qerror_min=q_min, qerror_max=q_max) - result = _build_json(metric=metric_key, - model=Path(fp32_model).name, - colorscheme=palette.colorscheme(), - error=qerror_map) + result = _build_json( + metric=metric_key, + model=Path(fp32_model).name, + colorscheme=palette.colorscheme(), + error=qerror_map) with open(output, "w") as f: json.dump(result, f) if dump_dot_graph: - _save_dot(circle_path=fp32_model, - dot_path=output + '.dot', - metric=metric_key, - colors=palette.colorscheme(), - qerror=qerror_map) + _save_dot( + circle_path=fp32_model, + dot_path=output + '.dot', + metric=metric_key, + colors=palette.colorscheme(), + qerror=qerror_map) def run_on_data(fp32_model, q_model, data, dump_dot_graph, computers): @@ -315,19 +320,21 @@ def run_on_data(fp32_model, q_model, data, dump_dot_graph, computers): qerror_map, q_min, q_max = cur_computer.get_final_result() palette = YLORRD9Palette(qerror_min=q_min, qerror_max=q_max) - result = _build_json(metric=metric_key, - model=Path(fp32_model).name, - colorscheme=palette.colorscheme(), - error=qerror_map) + result = _build_json( + metric=metric_key, + model=Path(fp32_model).name, + colorscheme=palette.colorscheme(), + error=qerror_map) with open(output, "w") as f: json.dump(result, f) if dump_dot_graph: - _save_dot(circle_path=fp32_model, - dot_path=output + '.dot', - metric=metric_key, - colors=palette.colorscheme(), - qerror=qerror_map) + _save_dot( + circle_path=fp32_model, + dot_path=output + '.dot', + metric=metric_key, + colors=palette.colorscheme(), + qerror=qerror_map) def main(): diff --git a/compiler/visq/visqlib/DotBuilder.py b/compiler/visq/visqlib/DotBuilder.py index bbb5007730b..a6afb966ca6 100644 --- a/compiler/visq/visqlib/DotBuilder.py +++ b/compiler/visq/visqlib/DotBuilder.py @@ -128,10 +128,11 @@ def save(self, qerror_map: dict): if op_name in qerror_map: qerror = qerror_map[op_name] - node = pydot.Node(_quote(op_name), - style="filled", - fillcolor=self._get_color(qerror), - xlabel=self._metric + ": {:.4f}".format(qerror)) + node = pydot.Node( + _quote(op_name), + style="filled", + fillcolor=self._get_color(qerror), + xlabel=self._metric + ": {:.4f}".format(qerror)) else: # qerror_map does not have qerror info for the op. Color gray. # When this happen? visq does not collect qerror info of some Ops diff --git a/infra/nnfw/python/setup.py b/infra/nnfw/python/setup.py index 8927fedc90e..e3c5baf12d9 100644 --- a/infra/nnfw/python/setup.py +++ b/infra/nnfw/python/setup.py @@ -75,9 +75,8 @@ def get_directories(): # If the environment variable is not set, get default one. product_dir = os.environ.get("PRODUCT_DIR", DEFAULT_PRODUCT_DIR) return os.path.join(THIS_FILE_DIR, product_dir), os.path.join( - product_dir, - "lib/" if product_dir != DEFAULT_PRODUCT_DIR else target_arch + - '-linux.release/out/lib') + product_dir, "lib/" if product_dir != DEFAULT_PRODUCT_DIR else + target_arch + '-linux.release/out/lib') product_dir, so_core_dir = get_directories() diff --git a/res/PyTorchExamples/examples/interpolate/__init__.py b/res/PyTorchExamples/examples/interpolate/__init__.py index 7eb5f1d34db..ba0da42f4e4 100644 --- a/res/PyTorchExamples/examples/interpolate/__init__.py +++ b/res/PyTorchExamples/examples/interpolate/__init__.py @@ -13,11 +13,12 @@ def __init__(self, scale_factor): self.scale_factor = scale_factor def forward(self, input): - return torch.nn.functional.interpolate(input, - scale_factor=self.scale_factor, - mode='bilinear', - align_corners=True, - recompute_scale_factor=True) + return torch.nn.functional.interpolate( + input, + scale_factor=self.scale_factor, + mode='bilinear', + align_corners=True, + recompute_scale_factor=True) def onnx_opset_version(self): return 11 diff --git a/res/PyTorchExamples/ptem.py b/res/PyTorchExamples/ptem.py index 445b1d6fd0b..6cc28b66a24 100755 --- a/res/PyTorchExamples/ptem.py +++ b/res/PyTorchExamples/ptem.py @@ -54,10 +54,8 @@ onnx_model_path = output_folder + example + ".onnx" - torch.onnx.export(module._model_, - module._dummy_, - onnx_model_path, - opset_version=opset_version) + torch.onnx.export( + module._model_, module._dummy_, onnx_model_path, opset_version=opset_version) print("Generate '" + example + ".onnx' - Done") onnx_model = onnx.load(onnx_model_path) diff --git a/res/TensorFlowPythonExamples/examples/conv2d_2/__init__.py b/res/TensorFlowPythonExamples/examples/conv2d_2/__init__.py index a0b87c37169..812fef12b53 100644 --- a/res/TensorFlowPythonExamples/examples/conv2d_2/__init__.py +++ b/res/TensorFlowPythonExamples/examples/conv2d_2/__init__.py @@ -9,9 +9,5 @@ strides = (1, 2, 2, 1) dilations = (1, 2, 2, 1) -op_ = tf.compat.v1.nn.conv2d(in_, - filters, - strides, - "VALID", - data_format="NHWC", - dilations=dilations) +op_ = tf.compat.v1.nn.conv2d( + in_, filters, strides, "VALID", data_format="NHWC", dilations=dilations) diff --git a/res/TensorFlowPythonExamples/examples/conv2d_transpose/__init__.py b/res/TensorFlowPythonExamples/examples/conv2d_transpose/__init__.py index e7ff0325fc4..cd317cee949 100644 --- a/res/TensorFlowPythonExamples/examples/conv2d_transpose/__init__.py +++ b/res/TensorFlowPythonExamples/examples/conv2d_transpose/__init__.py @@ -4,8 +4,5 @@ input_ = tf.compat.v1.placeholder(tf.float32, shape=(1, 8, 8, 1), name="Hole") kernel_ = tf.compat.v1.placeholder(tf.float32, shape=(3, 3, 1, 1), name="Hole") -op_ = tf.compat.v1.nn.conv2d_transpose(input_, - kernel_, - output_shape=[1, 8, 8, 1], - strides=[1, 1, 1, 1], - padding='SAME') +op_ = tf.compat.v1.nn.conv2d_transpose( + input_, kernel_, output_shape=[1, 8, 8, 1], strides=[1, 1, 1, 1], padding='SAME') diff --git a/res/TensorFlowPythonExamples/examples/depthwise_conv2d_1/__init__.py b/res/TensorFlowPythonExamples/examples/depthwise_conv2d_1/__init__.py index a1696f8f95c..a9c8b33eb9f 100644 --- a/res/TensorFlowPythonExamples/examples/depthwise_conv2d_1/__init__.py +++ b/res/TensorFlowPythonExamples/examples/depthwise_conv2d_1/__init__.py @@ -5,8 +5,8 @@ in_ = tf.compat.v1.placeholder(tf.float32, shape=(1, 32, 32, 4), name="Hole") -filters = np.array(np.random.uniform(low=-1., high=1, size=[3, 3, 4, 1]), - dtype=np.float32) +filters = np.array( + np.random.uniform(low=-1., high=1, size=[3, 3, 4, 1]), dtype=np.float32) strides = (1, 2, 2, 1) op_ = tf.compat.v1.nn.depthwise_conv2d(in_, filters, strides, "VALID", data_format="NHWC") diff --git a/res/TensorFlowPythonExamples/examples/depthwise_conv2d_2/__init__.py b/res/TensorFlowPythonExamples/examples/depthwise_conv2d_2/__init__.py index 7b4e9d9e3cb..8fbd0da4989 100644 --- a/res/TensorFlowPythonExamples/examples/depthwise_conv2d_2/__init__.py +++ b/res/TensorFlowPythonExamples/examples/depthwise_conv2d_2/__init__.py @@ -5,14 +5,10 @@ in_ = tf.compat.v1.placeholder(tf.float32, shape=(1, 32, 32, 4), name="Hole") -filters = np.array(np.random.uniform(low=-1., high=1, size=[3, 3, 4, 1]), - dtype=np.float32) +filters = np.array( + np.random.uniform(low=-1., high=1, size=[3, 3, 4, 1]), dtype=np.float32) strides = (1, 2, 2, 1) dilations = (2, 2) -op_ = tf.compat.v1.nn.depthwise_conv2d(in_, - filters, - strides, - "VALID", - data_format="NHWC", - dilations=dilations) +op_ = tf.compat.v1.nn.depthwise_conv2d( + in_, filters, strides, "VALID", data_format="NHWC", dilations=dilations) diff --git a/res/TensorFlowPythonExamples/examples/fused_batch_norm/__init__.py b/res/TensorFlowPythonExamples/examples/fused_batch_norm/__init__.py index 79796a5b436..628420c3bbd 100644 --- a/res/TensorFlowPythonExamples/examples/fused_batch_norm/__init__.py +++ b/res/TensorFlowPythonExamples/examples/fused_batch_norm/__init__.py @@ -8,9 +8,5 @@ variance = tf.constant([4., 5., 6.]) in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(3, 3, 3, 3), name="Hole") -fbn_ = tf.compat.v1.nn.fused_batch_norm(in_, - scale, - offset, - mean, - variance, - is_training=False) +fbn_ = tf.compat.v1.nn.fused_batch_norm( + in_, scale, offset, mean, variance, is_training=False) diff --git a/res/TensorFlowPythonExamples/examples/max_pool_with_argmax/__init__.py b/res/TensorFlowPythonExamples/examples/max_pool_with_argmax/__init__.py index c0aeed93b59..78daa034c87 100755 --- a/res/TensorFlowPythonExamples/examples/max_pool_with_argmax/__init__.py +++ b/res/TensorFlowPythonExamples/examples/max_pool_with_argmax/__init__.py @@ -3,7 +3,5 @@ tf.compat.v1.disable_eager_execution() in_ = tf.compat.v1.placeholder(dtype=tf.float32, shape=(1, 4, 4, 1), name="Hole") -op_ = tf.compat.v1.nn.max_pool_with_argmax(in_, - ksize=[1, 2, 2, 1], - strides=[1, 1, 1, 1], - padding="VALID") +op_ = tf.compat.v1.nn.max_pool_with_argmax( + in_, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding="VALID") diff --git a/res/TensorFlowPythonExamples/examples/one_hot/__init__.py b/res/TensorFlowPythonExamples/examples/one_hot/__init__.py index 27c1cb8077d..b99bb9ca0a9 100644 --- a/res/TensorFlowPythonExamples/examples/one_hot/__init__.py +++ b/res/TensorFlowPythonExamples/examples/one_hot/__init__.py @@ -6,8 +6,5 @@ depth_ = tf.compat.v1.placeholder(tf.int32, shape=(), name='Hole') on_value_ = tf.compat.v1.placeholder(tf.int32, shape=(), name='Hole') off_value_ = tf.compat.v1.placeholder(tf.int32, shape=(), name='Hole') -op_ = tf.one_hot(indices=indice_, - depth=depth_, - on_value=on_value_, - off_value=off_value_, - axis=-1) +op_ = tf.one_hot( + indices=indice_, depth=depth_, on_value=on_value_, off_value=off_value_, axis=-1) diff --git a/res/TensorFlowPythonExamples/examples/prelu/__init__.py b/res/TensorFlowPythonExamples/examples/prelu/__init__.py index 755d92c98b5..7e43f51014d 100644 --- a/res/TensorFlowPythonExamples/examples/prelu/__init__.py +++ b/res/TensorFlowPythonExamples/examples/prelu/__init__.py @@ -2,8 +2,7 @@ tf.compat.v1.disable_eager_execution() -input_tensor = tf.compat.v1.placeholder(dtype=tf.float32, - name="input", - shape=[1, 4, 4, 3]) +input_tensor = tf.compat.v1.placeholder( + dtype=tf.float32, name="input", shape=[1, 4, 4, 3]) prelu = tf.keras.layers.PReLU(shared_axes=[1, 2]) op_ = prelu(input_tensor) diff --git a/res/TensorFlowPythonExamples/examples/while_2/__init__.py b/res/TensorFlowPythonExamples/examples/while_2/__init__.py index 48da1ab70aa..9e26639bfb7 100644 --- a/res/TensorFlowPythonExamples/examples/while_2/__init__.py +++ b/res/TensorFlowPythonExamples/examples/while_2/__init__.py @@ -9,10 +9,8 @@ b = lambda i: tf.concat([i, x], axis=1) # this loop changs i's shape from [1, 0] -> [1, 1] -> [1, 2] -> ... -> [1, 10] -r = tf.compat.v1.while_loop(c, - b, [i], - name="While", - shape_invariants=[tf.TensorShape([1, None])]) +r = tf.compat.v1.while_loop( + c, b, [i], name="While", shape_invariants=[tf.TensorShape([1, None])]) output = tf.compat.v1.identity(r, name="Output") diff --git a/res/TensorFlowPythonExamples/examples/while_3/__init__.py b/res/TensorFlowPythonExamples/examples/while_3/__init__.py index 6c926c5d862..30ce15a1e30 100644 --- a/res/TensorFlowPythonExamples/examples/while_3/__init__.py +++ b/res/TensorFlowPythonExamples/examples/while_3/__init__.py @@ -17,10 +17,8 @@ def b(ii): # this loop changes i's shape from [1, 0] -> [1, 1] -> [1, 2] -> ... -> [1, 10] -r = tf.compat.v1.while_loop(c, - b, [i], - name="While", - shape_invariants=[tf.TensorShape([1, None])]) +r = tf.compat.v1.while_loop( + c, b, [i], name="While", shape_invariants=[tf.TensorShape([1, None])]) output = tf.compat.v1.identity(r, name="Output") diff --git a/res/TensorFlowPythonModels/examples/tconv-bn/__init__.py b/res/TensorFlowPythonModels/examples/tconv-bn/__init__.py index 110bcc4105c..ae034e8bf7f 100644 --- a/res/TensorFlowPythonModels/examples/tconv-bn/__init__.py +++ b/res/TensorFlowPythonModels/examples/tconv-bn/__init__.py @@ -4,23 +4,21 @@ input_ = tf.compat.v1.placeholder(tf.float32, shape=(1, 2, 2, 1), name="Hole") W = np.ones(9).reshape((3, 3, 1, 1)) filter_ = tf.compat.v1.constant(W, dtype=tf.float32) -tconv_ = tf.compat.v1.nn.conv2d_transpose(input_, - filter_, - output_shape=(1, 4, 4, 1), - strides=[1, 1, 1, 1], - padding='VALID') +tconv_ = tf.compat.v1.nn.conv2d_transpose( + input_, filter_, output_shape=(1, 4, 4, 1), strides=[1, 1, 1, 1], padding='VALID') scale_ = tf.compat.v1.constant([1.0177339315414429], dtype=tf.float32) offset_ = tf.compat.v1.constant([0.015628524124622345], dtype=tf.float32) mean_ = tf.compat.v1.constant([1.027155211195349693], dtype=tf.float32) variance_ = tf.compat.v1.constant([0.25580066442489624], dtype=tf.float32) -bn_out, _, _ = tf.compat.v1.nn.fused_batch_norm(tconv_, - scale_, - offset_, - mean=mean_, - variance=variance_, - epsilon=0.0010000000474974513, - is_training=False) +bn_out, _, _ = tf.compat.v1.nn.fused_batch_norm( + tconv_, + scale_, + offset_, + mean=mean_, + variance=variance_, + epsilon=0.0010000000474974513, + is_training=False) ''' python ../../compiler/tf2tfliteV2/tf2tfliteV2.py --v1 \ -i tconv-bn.pbtxt \ diff --git a/res/TensorFlowTests/NET_0003/test.py b/res/TensorFlowTests/NET_0003/test.py index 1ed2b3fe6bb..b5bad2daed8 100755 --- a/res/TensorFlowTests/NET_0003/test.py +++ b/res/TensorFlowTests/NET_0003/test.py @@ -9,11 +9,7 @@ input0 = tf.placeholder(tf.float32, [1, 3, 3, 5]) filter0 = tf.constant(1.0, shape=[2, 2, 5, 1]) conv = tf.nn.conv2d(input0, filter=filter0, strides=[1, 1, 1, 1], padding='SAME') -fbn = tf.nn.fused_batch_norm(conv, - scale=[1.0], - offset=[0.0], - mean=[0.0], - variance=[1.0], - is_training=False) +fbn = tf.nn.fused_batch_norm( + conv, scale=[1.0], offset=[0.0], mean=[0.0], variance=[1.0], is_training=False) print(tf.get_default_graph().as_graph_def()) diff --git a/res/TensorFlowTests/NET_0004/test.py b/res/TensorFlowTests/NET_0004/test.py index dd1b1095c96..a0c790d7945 100755 --- a/res/TensorFlowTests/NET_0004/test.py +++ b/res/TensorFlowTests/NET_0004/test.py @@ -10,11 +10,7 @@ filter0 = tf.constant(1.0, shape=[2, 2, 5, 2]) dconv = tf.nn.depthwise_conv2d(input0, filter0, [1, 1, 1, 1], 'SAME') const = tf.constant(2.0, shape=[10]) -fbn = tf.nn.fused_batch_norm(x=dconv, - scale=const, - offset=const, - mean=const, - variance=const, - is_training=False) +fbn = tf.nn.fused_batch_norm( + x=dconv, scale=const, offset=const, mean=const, variance=const, is_training=False) print(tf.get_default_graph().as_graph_def()) diff --git a/runtime/onert/api/python/package/infer.py b/runtime/onert/api/python/package/infer.py index f3f95c63c52..146c4b03efc 100644 --- a/runtime/onert/api/python/package/infer.py +++ b/runtime/onert/api/python/package/infer.py @@ -15,6 +15,7 @@ def num_elems(tensor_info): class session(libnnfw_api_pybind.nnfw_session): """Class inherited nnfw_session for easily processing input/output""" + def __init__(self, nnpackage_path, backends="cpu"): super().__init__(nnpackage_path, backends) self.inputs = [] @@ -32,8 +33,8 @@ def set_inputs(self, size, inputs_array=[]): print( f"model's input size is {size} but given inputs_array size is {len(inputs_array)}.\n{i}-th index input is replaced by an array filled with 0." ) - input_array = np.zeros((num_elems(input_tensorinfo)), - dtype=input_tensorinfo.dtype) + input_array = np.zeros( + (num_elems(input_tensorinfo)), dtype=input_tensorinfo.dtype) self.set_input(i, input_array) self.inputs.append(input_array) @@ -42,8 +43,8 @@ def set_outputs(self, size): """Set outputs for each index""" for i in range(size): output_tensorinfo = self.output_tensorinfo(i) - output_array = np.zeros((num_elems(output_tensorinfo)), - dtype=output_tensorinfo.dtype) + output_array = np.zeros( + (num_elems(output_tensorinfo)), dtype=output_tensorinfo.dtype) self.set_output(i, output_array) self.outputs.append(output_array) diff --git a/tests/scripts/merge_result_of_benchmark_nnpkg.py b/tests/scripts/merge_result_of_benchmark_nnpkg.py index 426e9aaad99..7e69df0eb10 100755 --- a/tests/scripts/merge_result_of_benchmark_nnpkg.py +++ b/tests/scripts/merge_result_of_benchmark_nnpkg.py @@ -244,20 +244,16 @@ def main(): # Option use = "Usage: %prog [options] filename" parser = argparse.ArgumentParser(usage=use) - parser.add_argument("-i", - "--input_dir", - dest="input_dir", - default=".", - help="dir to have csv files") - parser.add_argument("-o", - "--output_dir", - dest="output_dir", - default=".", - help="dir to be moved csv files into") - parser.add_argument("-l", - "--model_list", - dest="model_list", - help="file to have model list") + parser.add_argument( + "-i", "--input_dir", dest="input_dir", default=".", help="dir to have csv files") + parser.add_argument( + "-o", + "--output_dir", + dest="output_dir", + default=".", + help="dir to be moved csv files into") + parser.add_argument( + "-l", "--model_list", dest="model_list", help="file to have model list") options = parser.parse_args() diff --git a/tools/circle_plus_gen/lib/json_parser.py b/tools/circle_plus_gen/lib/json_parser.py index 6e8a4082bc3..8c2c1ebd687 100644 --- a/tools/circle_plus_gen/lib/json_parser.py +++ b/tools/circle_plus_gen/lib/json_parser.py @@ -15,7 +15,7 @@ def to_camel_case(string: str): def _generate_optimizer( - opt_type: utils.OPTIM_OPTIONS_T, args: dict + opt_type: utils.OPTIM_OPTIONS_T, args: dict ) -> Tuple[ctr_gen.Optimizer, ctr_gen.OptimizerOptions, utils.OPTIM_OPTIONS_T]: options_t_str: str = opt_type.__name__ # e.g. SGDOptionsT @@ -38,7 +38,7 @@ def _generate_optimizer( def load_optimizer( - opt_obj: dict + opt_obj: dict ) -> Tuple[ctr_gen.Optimizer, ctr_gen.OptimizerOptions, utils.OPTIM_OPTIONS_T]: ''' Return objects for circle_traininfo_generated.ModelTrainingT.[optimizer, optimizerOptType, OptimizerOpt] diff --git a/tools/circle_plus_gen/main.py b/tools/circle_plus_gen/main.py index b20dbc98d83..82683ee278c 100644 --- a/tools/circle_plus_gen/main.py +++ b/tools/circle_plus_gen/main.py @@ -12,9 +12,8 @@ def get_cmd_args(): ) parser.add_argument('input', help='input circle file') - parser.add_argument('hyperparameters', - nargs='?', - help='training hyperparameters json file') + parser.add_argument( + 'hyperparameters', nargs='?', help='training hyperparameters json file') parser.add_argument( 'output', nargs='?', diff --git a/tools/circle_plus_gen/schema/circle_schema_generated.py b/tools/circle_plus_gen/schema/circle_schema_generated.py index 10a9cde1777..11809c08ed0 100644 --- a/tools/circle_plus_gen/schema/circle_schema_generated.py +++ b/tools/circle_plus_gen/schema/circle_schema_generated.py @@ -862,10 +862,8 @@ def GetRootAsCustomQuantization(cls, buf, offset=0): @classmethod def CustomQuantizationBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # CustomQuantization def Init(self, buf, pos): @@ -993,10 +991,8 @@ def GetRootAsQuantizationParameters(cls, buf, offset=0): @classmethod def QuantizationParametersBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # QuantizationParameters def Init(self, buf, pos): @@ -1342,10 +1338,8 @@ def GetRootAsInt32Vector(cls, buf, offset=0): @classmethod def Int32VectorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # Int32Vector def Init(self, buf, pos): @@ -1473,10 +1467,8 @@ def GetRootAsUint16Vector(cls, buf, offset=0): @classmethod def Uint16VectorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # Uint16Vector def Init(self, buf, pos): @@ -1604,10 +1596,8 @@ def GetRootAsUint8Vector(cls, buf, offset=0): @classmethod def Uint8VectorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # Uint8Vector def Init(self, buf, pos): @@ -1735,10 +1725,8 @@ def GetRootAsDimensionMetadata(cls, buf, offset=0): @classmethod def DimensionMetadataBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # DimensionMetadata def Init(self, buf, pos): @@ -1910,10 +1898,8 @@ def GetRootAsSparsityParameters(cls, buf, offset=0): @classmethod def SparsityParametersBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # SparsityParameters def Init(self, buf, pos): @@ -2151,10 +2137,8 @@ def GetRootAsVariantSubType(cls, buf, offset=0): @classmethod def VariantSubTypeBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # VariantSubType def Init(self, buf, pos): @@ -2311,10 +2295,8 @@ def GetRootAsTensor(cls, buf, offset=0): @classmethod def TensorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # Tensor def Init(self, buf, pos): @@ -2671,10 +2653,8 @@ def GetRootAsStablehloGatherOptions(cls, buf, offset=0): @classmethod def StablehloGatherOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # StablehloGatherOptions def Init(self, buf, pos): @@ -3006,10 +2986,8 @@ def StablehloTransposeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # StablehloTransposeOptions def Init(self, buf, pos): @@ -3141,10 +3119,8 @@ def StablehloDotGeneralOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # StablehloDotGeneralOptions def Init(self, buf, pos): @@ -3525,10 +3501,8 @@ def StablehloReduceWindowOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # StablehloReduceWindowOptions def Init(self, buf, pos): @@ -3904,10 +3878,8 @@ def GetRootAsStablehloWhileOptions(cls, buf, offset=0): @classmethod def StablehloWhileOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # StablehloWhileOptions def Init(self, buf, pos): @@ -4001,10 +3973,8 @@ def GetRootAsStablehloSortOptions(cls, buf, offset=0): @classmethod def StablehloSortOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # StablehloSortOptions def Init(self, buf, pos): @@ -4117,10 +4087,8 @@ def StablehloConcatenateOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # StablehloConcatenateOptions def Init(self, buf, pos): @@ -4203,10 +4171,8 @@ def StablehloBroadcastInDimOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # StablehloBroadcastInDimOptions def Init(self, buf, pos): @@ -4339,10 +4305,8 @@ def GetRootAsStablehloCompareOptions(cls, buf, offset=0): @classmethod def StablehloCompareOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # StablehloCompareOptions def Init(self, buf, pos): @@ -4439,10 +4403,8 @@ def StablehloDynamicSliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # StablehloDynamicSliceOptions def Init(self, buf, pos): @@ -4571,10 +4533,8 @@ def GetRootAsStablehloPadOptions(cls, buf, offset=0): @classmethod def StablehloPadOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # StablehloPadOptions def Init(self, buf, pos): @@ -4817,10 +4777,8 @@ def GetRootAsStablehloIotaOptions(cls, buf, offset=0): @classmethod def StablehloIotaOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # StablehloIotaOptions def Init(self, buf, pos): @@ -4903,10 +4861,8 @@ def StablehloCustomCallOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # StablehloCustomCallOptions def Init(self, buf, pos): @@ -5161,10 +5117,8 @@ def GetRootAsStablehloReduceOptions(cls, buf, offset=0): @classmethod def StablehloReduceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # StablehloReduceOptions def Init(self, buf, pos): @@ -5306,10 +5260,8 @@ def GetRootAsStablehloSliceOptions(cls, buf, offset=0): @classmethod def StablehloSliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # StablehloSliceOptions def Init(self, buf, pos): @@ -5554,10 +5506,8 @@ def StablehloConvolutionOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # StablehloConvolutionOptions def Init(self, buf, pos): @@ -6284,10 +6234,8 @@ def GetRootAsStablehloScatterOptions(cls, buf, offset=0): @classmethod def StablehloScatterOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # StablehloScatterOptions def Init(self, buf, pos): @@ -6601,10 +6549,8 @@ def StablehloRngBitGeneratorOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # StablehloRngBitGeneratorOptions def Init(self, buf, pos): @@ -6684,10 +6630,8 @@ def GetRootAsConv2DOptions(cls, buf, offset=0): @classmethod def Conv2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # Conv2DOptions def Init(self, buf, pos): @@ -6851,10 +6795,8 @@ def GetRootAsConv3DOptions(cls, buf, offset=0): @classmethod def Conv3DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # Conv3DOptions def Init(self, buf, pos): @@ -7032,10 +6974,8 @@ def GetRootAsPool2DOptions(cls, buf, offset=0): @classmethod def Pool2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # Pool2DOptions def Init(self, buf, pos): @@ -7185,10 +7125,8 @@ def GetRootAsDepthwiseConv2DOptions(cls, buf, offset=0): @classmethod def DepthwiseConv2DOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # DepthwiseConv2DOptions def Init(self, buf, pos): @@ -7353,10 +7291,8 @@ def GetRootAsConcatEmbeddingsOptions(cls, buf, offset=0): @classmethod def ConcatEmbeddingsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # ConcatEmbeddingsOptions def Init(self, buf, pos): @@ -7563,10 +7499,8 @@ def GetRootAsLSHProjectionOptions(cls, buf, offset=0): @classmethod def LSHProjectionOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # LSHProjectionOptions def Init(self, buf, pos): @@ -7646,10 +7580,8 @@ def GetRootAsSVDFOptions(cls, buf, offset=0): @classmethod def SVDFOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # SVDFOptions def Init(self, buf, pos): @@ -7758,10 +7690,8 @@ def GetRootAsRNNOptions(cls, buf, offset=0): @classmethod def RNNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # RNNOptions def Init(self, buf, pos): @@ -7856,10 +7786,8 @@ def GetRootAsSequenceRNNOptions(cls, buf, offset=0): @classmethod def SequenceRNNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # SequenceRNNOptions def Init(self, buf, pos): @@ -7974,10 +7902,8 @@ def BidirectionalSequenceRNNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # BidirectionalSequenceRNNOptions def Init(self, buf, pos): @@ -8108,10 +8034,8 @@ def GetRootAsFullyConnectedOptions(cls, buf, offset=0): @classmethod def FullyConnectedOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # FullyConnectedOptions def Init(self, buf, pos): @@ -8251,10 +8175,8 @@ def GetRootAsSoftmaxOptions(cls, buf, offset=0): @classmethod def SoftmaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # SoftmaxOptions def Init(self, buf, pos): @@ -8334,10 +8256,8 @@ def GetRootAsConcatenationOptions(cls, buf, offset=0): @classmethod def ConcatenationOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # ConcatenationOptions def Init(self, buf, pos): @@ -8432,10 +8352,8 @@ def GetRootAsAddOptions(cls, buf, offset=0): @classmethod def AddOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # AddOptions def Init(self, buf, pos): @@ -8530,10 +8448,8 @@ def GetRootAsMulOptions(cls, buf, offset=0): @classmethod def MulOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # MulOptions def Init(self, buf, pos): @@ -8613,10 +8529,8 @@ def GetRootAsL2NormOptions(cls, buf, offset=0): @classmethod def L2NormOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # L2NormOptions def Init(self, buf, pos): @@ -8699,10 +8613,8 @@ def LocalResponseNormalizationOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # LocalResponseNormalizationOptions def Init(self, buf, pos): @@ -8824,10 +8736,8 @@ def GetRootAsLSTMOptions(cls, buf, offset=0): @classmethod def LSTMOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # LSTMOptions def Init(self, buf, pos): @@ -8967,10 +8877,8 @@ def UnidirectionalSequenceLSTMOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # UnidirectionalSequenceLSTMOptions def Init(self, buf, pos): @@ -9135,10 +9043,8 @@ def BidirectionalSequenceLSTMOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # BidirectionalSequenceLSTMOptions def Init(self, buf, pos): @@ -9297,10 +9203,8 @@ def GetRootAsResizeBilinearOptions(cls, buf, offset=0): @classmethod def ResizeBilinearOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # ResizeBilinearOptions def Init(self, buf, pos): @@ -9399,10 +9303,8 @@ def ResizeNearestNeighborOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # ResizeNearestNeighborOptions def Init(self, buf, pos): @@ -9498,10 +9400,8 @@ def GetRootAsCallOptions(cls, buf, offset=0): @classmethod def CallOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # CallOptions def Init(self, buf, pos): @@ -9581,10 +9481,8 @@ def GetRootAsPadOptions(cls, buf, offset=0): @classmethod def PadOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # PadOptions def Init(self, buf, pos): @@ -9651,10 +9549,8 @@ def GetRootAsPadV2Options(cls, buf, offset=0): @classmethod def PadV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # PadV2Options def Init(self, buf, pos): @@ -9721,10 +9617,8 @@ def GetRootAsReshapeOptions(cls, buf, offset=0): @classmethod def ReshapeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # ReshapeOptions def Init(self, buf, pos): @@ -9852,10 +9746,8 @@ def GetRootAsSpaceToBatchNDOptions(cls, buf, offset=0): @classmethod def SpaceToBatchNDOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # SpaceToBatchNDOptions def Init(self, buf, pos): @@ -9922,10 +9814,8 @@ def GetRootAsBatchToSpaceNDOptions(cls, buf, offset=0): @classmethod def BatchToSpaceNDOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # BatchToSpaceNDOptions def Init(self, buf, pos): @@ -9992,10 +9882,8 @@ def GetRootAsSkipGramOptions(cls, buf, offset=0): @classmethod def SkipGramOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # SkipGramOptions def Init(self, buf, pos): @@ -10104,10 +9992,8 @@ def GetRootAsSpaceToDepthOptions(cls, buf, offset=0): @classmethod def SpaceToDepthOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # SpaceToDepthOptions def Init(self, buf, pos): @@ -10187,10 +10073,8 @@ def GetRootAsDepthToSpaceOptions(cls, buf, offset=0): @classmethod def DepthToSpaceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # DepthToSpaceOptions def Init(self, buf, pos): @@ -10270,10 +10154,8 @@ def GetRootAsSubOptions(cls, buf, offset=0): @classmethod def SubOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # SubOptions def Init(self, buf, pos): @@ -10368,10 +10250,8 @@ def GetRootAsDivOptions(cls, buf, offset=0): @classmethod def DivOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # DivOptions def Init(self, buf, pos): @@ -10451,10 +10331,8 @@ def GetRootAsTopKV2Options(cls, buf, offset=0): @classmethod def TopKV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # TopKV2Options def Init(self, buf, pos): @@ -10524,10 +10402,8 @@ def EmbeddingLookupSparseOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # EmbeddingLookupSparseOptions def Init(self, buf, pos): @@ -10607,10 +10483,8 @@ def GetRootAsGatherOptions(cls, buf, offset=0): @classmethod def GatherOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # GatherOptions def Init(self, buf, pos): @@ -10704,10 +10578,8 @@ def GetRootAsTransposeOptions(cls, buf, offset=0): @classmethod def TransposeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # TransposeOptions def Init(self, buf, pos): @@ -10774,10 +10646,8 @@ def GetRootAsExpOptions(cls, buf, offset=0): @classmethod def ExpOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # ExpOptions def Init(self, buf, pos): @@ -10844,10 +10714,8 @@ def GetRootAsCosOptions(cls, buf, offset=0): @classmethod def CosOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # CosOptions def Init(self, buf, pos): @@ -10914,10 +10782,8 @@ def GetRootAsReducerOptions(cls, buf, offset=0): @classmethod def ReducerOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # ReducerOptions def Init(self, buf, pos): @@ -10998,10 +10864,8 @@ def GetRootAsSqueezeOptions(cls, buf, offset=0): @classmethod def SqueezeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # SqueezeOptions def Init(self, buf, pos): @@ -11129,10 +10993,8 @@ def GetRootAsSplitOptions(cls, buf, offset=0): @classmethod def SplitOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # SplitOptions def Init(self, buf, pos): @@ -11212,10 +11074,8 @@ def GetRootAsSplitVOptions(cls, buf, offset=0): @classmethod def SplitVOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # SplitVOptions def Init(self, buf, pos): @@ -11295,10 +11155,8 @@ def GetRootAsStridedSliceOptions(cls, buf, offset=0): @classmethod def StridedSliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # StridedSliceOptions def Init(self, buf, pos): @@ -11449,10 +11307,8 @@ def GetRootAsLogSoftmaxOptions(cls, buf, offset=0): @classmethod def LogSoftmaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # LogSoftmaxOptions def Init(self, buf, pos): @@ -11519,10 +11375,8 @@ def GetRootAsCastOptions(cls, buf, offset=0): @classmethod def CastOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # CastOptions def Init(self, buf, pos): @@ -11616,10 +11470,8 @@ def GetRootAsDequantizeOptions(cls, buf, offset=0): @classmethod def DequantizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # DequantizeOptions def Init(self, buf, pos): @@ -11686,10 +11538,8 @@ def GetRootAsMaximumMinimumOptions(cls, buf, offset=0): @classmethod def MaximumMinimumOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # MaximumMinimumOptions def Init(self, buf, pos): @@ -11756,10 +11606,8 @@ def GetRootAsTileOptions(cls, buf, offset=0): @classmethod def TileOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # TileOptions def Init(self, buf, pos): @@ -11826,10 +11674,8 @@ def GetRootAsArgMaxOptions(cls, buf, offset=0): @classmethod def ArgMaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # ArgMaxOptions def Init(self, buf, pos): @@ -11909,10 +11755,8 @@ def GetRootAsArgMinOptions(cls, buf, offset=0): @classmethod def ArgMinOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # ArgMinOptions def Init(self, buf, pos): @@ -11992,10 +11836,8 @@ def GetRootAsGreaterOptions(cls, buf, offset=0): @classmethod def GreaterOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # GreaterOptions def Init(self, buf, pos): @@ -12062,10 +11904,8 @@ def GetRootAsGreaterEqualOptions(cls, buf, offset=0): @classmethod def GreaterEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # GreaterEqualOptions def Init(self, buf, pos): @@ -12132,10 +11972,8 @@ def GetRootAsLessOptions(cls, buf, offset=0): @classmethod def LessOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # LessOptions def Init(self, buf, pos): @@ -12202,10 +12040,8 @@ def GetRootAsLessEqualOptions(cls, buf, offset=0): @classmethod def LessEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # LessEqualOptions def Init(self, buf, pos): @@ -12272,10 +12108,8 @@ def GetRootAsNegOptions(cls, buf, offset=0): @classmethod def NegOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # NegOptions def Init(self, buf, pos): @@ -12342,10 +12176,8 @@ def GetRootAsSelectOptions(cls, buf, offset=0): @classmethod def SelectOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # SelectOptions def Init(self, buf, pos): @@ -12412,10 +12244,8 @@ def GetRootAsSliceOptions(cls, buf, offset=0): @classmethod def SliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # SliceOptions def Init(self, buf, pos): @@ -12482,10 +12312,8 @@ def GetRootAsTransposeConvOptions(cls, buf, offset=0): @classmethod def TransposeConvOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # TransposeConvOptions def Init(self, buf, pos): @@ -12622,10 +12450,8 @@ def GetRootAsExpandDimsOptions(cls, buf, offset=0): @classmethod def ExpandDimsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # ExpandDimsOptions def Init(self, buf, pos): @@ -12692,10 +12518,8 @@ def GetRootAsSparseToDenseOptions(cls, buf, offset=0): @classmethod def SparseToDenseOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # SparseToDenseOptions def Init(self, buf, pos): @@ -12776,10 +12600,8 @@ def GetRootAsEqualOptions(cls, buf, offset=0): @classmethod def EqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # EqualOptions def Init(self, buf, pos): @@ -12846,10 +12668,8 @@ def GetRootAsNotEqualOptions(cls, buf, offset=0): @classmethod def NotEqualOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # NotEqualOptions def Init(self, buf, pos): @@ -12916,10 +12736,8 @@ def GetRootAsShapeOptions(cls, buf, offset=0): @classmethod def ShapeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # ShapeOptions def Init(self, buf, pos): @@ -12999,10 +12817,8 @@ def GetRootAsRankOptions(cls, buf, offset=0): @classmethod def RankOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # RankOptions def Init(self, buf, pos): @@ -13069,10 +12885,8 @@ def GetRootAsPowOptions(cls, buf, offset=0): @classmethod def PowOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # PowOptions def Init(self, buf, pos): @@ -13139,10 +12953,8 @@ def GetRootAsFakeQuantOptions(cls, buf, offset=0): @classmethod def FakeQuantOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # FakeQuantOptions def Init(self, buf, pos): @@ -13265,10 +13077,8 @@ def GetRootAsPackOptions(cls, buf, offset=0): @classmethod def PackOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # PackOptions def Init(self, buf, pos): @@ -13362,10 +13172,8 @@ def GetRootAsLogicalOrOptions(cls, buf, offset=0): @classmethod def LogicalOrOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # LogicalOrOptions def Init(self, buf, pos): @@ -13432,10 +13240,8 @@ def GetRootAsOneHotOptions(cls, buf, offset=0): @classmethod def OneHotOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # OneHotOptions def Init(self, buf, pos): @@ -13515,10 +13321,8 @@ def GetRootAsAbsOptions(cls, buf, offset=0): @classmethod def AbsOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # AbsOptions def Init(self, buf, pos): @@ -13585,10 +13389,8 @@ def GetRootAsHardSwishOptions(cls, buf, offset=0): @classmethod def HardSwishOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # HardSwishOptions def Init(self, buf, pos): @@ -13655,10 +13457,8 @@ def GetRootAsLogicalAndOptions(cls, buf, offset=0): @classmethod def LogicalAndOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # LogicalAndOptions def Init(self, buf, pos): @@ -13725,10 +13525,8 @@ def GetRootAsLogicalNotOptions(cls, buf, offset=0): @classmethod def LogicalNotOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # LogicalNotOptions def Init(self, buf, pos): @@ -13795,10 +13593,8 @@ def GetRootAsUnpackOptions(cls, buf, offset=0): @classmethod def UnpackOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # UnpackOptions def Init(self, buf, pos): @@ -13892,10 +13688,8 @@ def GetRootAsFloorDivOptions(cls, buf, offset=0): @classmethod def FloorDivOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # FloorDivOptions def Init(self, buf, pos): @@ -13962,10 +13756,8 @@ def GetRootAsSquareOptions(cls, buf, offset=0): @classmethod def SquareOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # SquareOptions def Init(self, buf, pos): @@ -14032,10 +13824,8 @@ def GetRootAsZerosLikeOptions(cls, buf, offset=0): @classmethod def ZerosLikeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # ZerosLikeOptions def Init(self, buf, pos): @@ -14102,10 +13892,8 @@ def GetRootAsFillOptions(cls, buf, offset=0): @classmethod def FillOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # FillOptions def Init(self, buf, pos): @@ -14172,10 +13960,8 @@ def GetRootAsFloorModOptions(cls, buf, offset=0): @classmethod def FloorModOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # FloorModOptions def Init(self, buf, pos): @@ -14242,10 +14028,8 @@ def GetRootAsRangeOptions(cls, buf, offset=0): @classmethod def RangeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # RangeOptions def Init(self, buf, pos): @@ -14312,10 +14096,8 @@ def GetRootAsLeakyReluOptions(cls, buf, offset=0): @classmethod def LeakyReluOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # LeakyReluOptions def Init(self, buf, pos): @@ -14394,14 +14176,10 @@ def GetRootAsSquaredDifferenceOptions(cls, buf, offset=0): return cls.GetRootAs(buf, offset) @classmethod - def SquaredDifferenceOptionsBufferHasIdentifier(cls, - buf, - offset, + def SquaredDifferenceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # SquaredDifferenceOptions def Init(self, buf, pos): @@ -14468,10 +14246,8 @@ def GetRootAsMirrorPadOptions(cls, buf, offset=0): @classmethod def MirrorPadOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # MirrorPadOptions def Init(self, buf, pos): @@ -14551,10 +14327,8 @@ def GetRootAsUniqueOptions(cls, buf, offset=0): @classmethod def UniqueOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # UniqueOptions def Init(self, buf, pos): @@ -14634,10 +14408,8 @@ def GetRootAsReverseV2Options(cls, buf, offset=0): @classmethod def ReverseV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # ReverseV2Options def Init(self, buf, pos): @@ -14704,10 +14476,8 @@ def GetRootAsAddNOptions(cls, buf, offset=0): @classmethod def AddNOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # AddNOptions def Init(self, buf, pos): @@ -14774,10 +14544,8 @@ def GetRootAsGatherNdOptions(cls, buf, offset=0): @classmethod def GatherNdOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # GatherNdOptions def Init(self, buf, pos): @@ -14844,10 +14612,8 @@ def GetRootAsWhereOptions(cls, buf, offset=0): @classmethod def WhereOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # WhereOptions def Init(self, buf, pos): @@ -14914,10 +14680,8 @@ def GetRootAsReverseSequenceOptions(cls, buf, offset=0): @classmethod def ReverseSequenceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # ReverseSequenceOptions def Init(self, buf, pos): @@ -15011,10 +14775,8 @@ def GetRootAsMatrixDiagOptions(cls, buf, offset=0): @classmethod def MatrixDiagOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # MatrixDiagOptions def Init(self, buf, pos): @@ -15081,10 +14843,8 @@ def GetRootAsQuantizeOptions(cls, buf, offset=0): @classmethod def QuantizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # QuantizeOptions def Init(self, buf, pos): @@ -15151,10 +14911,8 @@ def GetRootAsMatrixSetDiagOptions(cls, buf, offset=0): @classmethod def MatrixSetDiagOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # MatrixSetDiagOptions def Init(self, buf, pos): @@ -15221,10 +14979,8 @@ def GetRootAsIfOptions(cls, buf, offset=0): @classmethod def IfOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # IfOptions def Init(self, buf, pos): @@ -15318,10 +15074,8 @@ def GetRootAsCallOnceOptions(cls, buf, offset=0): @classmethod def CallOnceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # CallOnceOptions def Init(self, buf, pos): @@ -15401,10 +15155,8 @@ def GetRootAsWhileOptions(cls, buf, offset=0): @classmethod def WhileOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # WhileOptions def Init(self, buf, pos): @@ -15501,10 +15253,8 @@ def NonMaxSuppressionV4OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # NonMaxSuppressionV4Options def Init(self, buf, pos): @@ -15574,10 +15324,8 @@ def NonMaxSuppressionV5OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # NonMaxSuppressionV5Options def Init(self, buf, pos): @@ -15644,10 +15392,8 @@ def GetRootAsScatterNdOptions(cls, buf, offset=0): @classmethod def ScatterNdOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # ScatterNdOptions def Init(self, buf, pos): @@ -15714,10 +15460,8 @@ def GetRootAsSelectV2Options(cls, buf, offset=0): @classmethod def SelectV2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # SelectV2Options def Init(self, buf, pos): @@ -15784,10 +15528,8 @@ def GetRootAsDensifyOptions(cls, buf, offset=0): @classmethod def DensifyOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # DensifyOptions def Init(self, buf, pos): @@ -15854,10 +15596,8 @@ def GetRootAsSegmentSumOptions(cls, buf, offset=0): @classmethod def SegmentSumOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # SegmentSumOptions def Init(self, buf, pos): @@ -15924,10 +15664,8 @@ def GetRootAsBatchMatMulOptions(cls, buf, offset=0): @classmethod def BatchMatMulOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # BatchMatMulOptions def Init(self, buf, pos): @@ -16039,10 +15777,8 @@ def GetRootAsCumsumOptions(cls, buf, offset=0): @classmethod def CumsumOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # CumsumOptions def Init(self, buf, pos): @@ -16138,10 +15874,8 @@ def GetRootAsBroadcastToOptions(cls, buf, offset=0): @classmethod def BroadcastToOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # BroadcastToOptions def Init(self, buf, pos): @@ -16208,10 +15942,8 @@ def GetRootAsRfft2dOptions(cls, buf, offset=0): @classmethod def Rfft2dOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # Rfft2dOptions def Init(self, buf, pos): @@ -16278,10 +16010,8 @@ def GetRootAsHashtableOptions(cls, buf, offset=0): @classmethod def HashtableOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # HashtableOptions def Init(self, buf, pos): @@ -16389,10 +16119,8 @@ def GetRootAsHashtableFindOptions(cls, buf, offset=0): @classmethod def HashtableFindOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # HashtableFindOptions def Init(self, buf, pos): @@ -16459,10 +16187,8 @@ def GetRootAsHashtableImportOptions(cls, buf, offset=0): @classmethod def HashtableImportOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # HashtableImportOptions def Init(self, buf, pos): @@ -16529,10 +16255,8 @@ def GetRootAsHashtableSizeOptions(cls, buf, offset=0): @classmethod def HashtableSizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # HashtableSizeOptions def Init(self, buf, pos): @@ -16599,10 +16323,8 @@ def GetRootAsVarHandleOptions(cls, buf, offset=0): @classmethod def VarHandleOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # VarHandleOptions def Init(self, buf, pos): @@ -16704,10 +16426,8 @@ def GetRootAsReadVariableOptions(cls, buf, offset=0): @classmethod def ReadVariableOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # ReadVariableOptions def Init(self, buf, pos): @@ -16774,10 +16494,8 @@ def GetRootAsAssignVariableOptions(cls, buf, offset=0): @classmethod def AssignVariableOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # AssignVariableOptions def Init(self, buf, pos): @@ -16844,10 +16562,8 @@ def GetRootAsRandomOptions(cls, buf, offset=0): @classmethod def RandomOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # RandomOptions def Init(self, buf, pos): @@ -16941,10 +16657,8 @@ def GetRootAsBucketizeOptions(cls, buf, offset=0): @classmethod def BucketizeOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # BucketizeOptions def Init(self, buf, pos): @@ -17072,10 +16786,8 @@ def GetRootAsGeluOptions(cls, buf, offset=0): @classmethod def GeluOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # GeluOptions def Init(self, buf, pos): @@ -17159,10 +16871,8 @@ def DynamicUpdateSliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # DynamicUpdateSliceOptions def Init(self, buf, pos): @@ -17232,10 +16942,8 @@ def UnsortedSegmentProdOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # UnsortedSegmentProdOptions def Init(self, buf, pos): @@ -17305,10 +17013,8 @@ def UnsortedSegmentMaxOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # UnsortedSegmentMaxOptions def Init(self, buf, pos): @@ -17378,10 +17084,8 @@ def UnsortedSegmentSumOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # UnsortedSegmentSumOptions def Init(self, buf, pos): @@ -17448,10 +17152,8 @@ def GetRootAsATan2Options(cls, buf, offset=0): @classmethod def ATan2OptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # ATan2Options def Init(self, buf, pos): @@ -17521,10 +17223,8 @@ def UnsortedSegmentMinOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # UnsortedSegmentMinOptions def Init(self, buf, pos): @@ -17591,10 +17291,8 @@ def GetRootAsSignOptions(cls, buf, offset=0): @classmethod def SignOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # SignOptions def Init(self, buf, pos): @@ -17661,10 +17359,8 @@ def GetRootAsBitcastOptions(cls, buf, offset=0): @classmethod def BitcastOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # BitcastOptions def Init(self, buf, pos): @@ -17731,10 +17427,8 @@ def GetRootAsBitwiseXorOptions(cls, buf, offset=0): @classmethod def BitwiseXorOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # BitwiseXorOptions def Init(self, buf, pos): @@ -17801,10 +17495,8 @@ def GetRootAsRightShiftOptions(cls, buf, offset=0): @classmethod def RightShiftOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # RightShiftOptions def Init(self, buf, pos): @@ -17871,10 +17563,8 @@ def GetRootAsDilateOptions(cls, buf, offset=0): @classmethod def DilateOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # DilateOptions def Init(self, buf, pos): @@ -17941,10 +17631,8 @@ def GetRootAsReduceWindowOptions(cls, buf, offset=0): @classmethod def ReduceWindowOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # ReduceWindowOptions def Init(self, buf, pos): @@ -18024,10 +17712,8 @@ def GetRootAsGRUOptions(cls, buf, offset=0): @classmethod def GRUOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # GRUOptions def Init(self, buf, pos): @@ -18137,10 +17823,8 @@ def GetRootAsBCQGatherOptions(cls, buf, offset=0): @classmethod def BCQGatherOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # BCQGatherOptions def Init(self, buf, pos): @@ -18233,14 +17917,10 @@ def GetRootAsBCQFullyConnectedOptions(cls, buf, offset=0): return cls.GetRootAs(buf, offset) @classmethod - def BCQFullyConnectedOptionsBufferHasIdentifier(cls, - buf, - offset, + def BCQFullyConnectedOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # BCQFullyConnectedOptions def Init(self, buf, pos): @@ -18335,10 +18015,8 @@ def GetRootAsInstanceNormOptions(cls, buf, offset=0): @classmethod def InstanceNormOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # InstanceNormOptions def Init(self, buf, pos): @@ -18433,10 +18111,8 @@ def GetRootAsOperatorCode(cls, buf, offset=0): @classmethod def OperatorCodeBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # OperatorCode def Init(self, buf, pos): @@ -18562,10 +18238,8 @@ def GetRootAsOperator(cls, buf, offset=0): @classmethod def OperatorBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # Operator def Init(self, buf, pos): @@ -19047,10 +18721,8 @@ def GetRootAsSubGraph(cls, buf, offset=0): @classmethod def SubGraphBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # SubGraph def Init(self, buf, pos): @@ -19356,10 +19028,8 @@ def GetRootAsBuffer(cls, buf, offset=0): @classmethod def BufferBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # Buffer def Init(self, buf, pos): @@ -19515,10 +19185,8 @@ def GetRootAsMetadata(cls, buf, offset=0): @classmethod def MetadataBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # Metadata def Init(self, buf, pos): @@ -19616,10 +19284,8 @@ def GetRootAsTensorMap(cls, buf, offset=0): @classmethod def TensorMapBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # TensorMap def Init(self, buf, pos): @@ -19717,10 +19383,8 @@ def GetRootAsSignatureDef(cls, buf, offset=0): @classmethod def SignatureDefBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # SignatureDef def Init(self, buf, pos): @@ -19928,10 +19592,8 @@ def GetRootAsModel(cls, buf, offset=0): @classmethod def ModelBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x49\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x49\x52\x30", size_prefixed=size_prefixed) # Model def Init(self, buf, pos): diff --git a/tools/circle_plus_gen/schema/circle_traininfo_generated.py b/tools/circle_plus_gen/schema/circle_traininfo_generated.py index 95be5d12856..e74cab5c22b 100644 --- a/tools/circle_plus_gen/schema/circle_traininfo_generated.py +++ b/tools/circle_plus_gen/schema/circle_traininfo_generated.py @@ -79,10 +79,8 @@ def GetRootAsSGDOptions(cls, buf, offset=0): @classmethod def SGDOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x54\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x54\x52\x30", size_prefixed=size_prefixed) # SGDOptions def Init(self, buf, pos): @@ -162,10 +160,8 @@ def GetRootAsAdamOptions(cls, buf, offset=0): @classmethod def AdamOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x54\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x54\x52\x30", size_prefixed=size_prefixed) # AdamOptions def Init(self, buf, pos): @@ -288,10 +284,8 @@ def GetRootAsSparseCategoricalCrossentropyOptions(cls, buf, offset=0): @classmethod def SparseCategoricalCrossentropyOptionsBufferHasIdentifier( cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x54\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x54\x52\x30", size_prefixed=size_prefixed) # SparseCategoricalCrossentropyOptions def Init(self, buf, pos): @@ -376,10 +370,8 @@ def CategoricalCrossentropyOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x54\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x54\x52\x30", size_prefixed=size_prefixed) # CategoricalCrossentropyOptions def Init(self, buf, pos): @@ -460,10 +452,8 @@ def GetRootAsMeanSquaredErrorOptions(cls, buf, offset=0): @classmethod def MeanSquaredErrorOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x54\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x54\x52\x30", size_prefixed=size_prefixed) # MeanSquaredErrorOptions def Init(self, buf, pos): @@ -530,10 +520,8 @@ def GetRootAsModelTraining(cls, buf, offset=0): @classmethod def ModelTrainingBufferHasIdentifier(cls, buf, offset, size_prefixed=False): - return flatbuffers.util.BufferHasIdentifier(buf, - offset, - b"\x43\x54\x52\x30", - size_prefixed=size_prefixed) + return flatbuffers.util.BufferHasIdentifier( + buf, offset, b"\x43\x54\x52\x30", size_prefixed=size_prefixed) # ModelTraining def Init(self, buf, pos): diff --git a/tools/extract_weights_from_tflite/extract.py b/tools/extract_weights_from_tflite/extract.py index 4f7755e78c6..7559ba7e869 100755 --- a/tools/extract_weights_from_tflite/extract.py +++ b/tools/extract_weights_from_tflite/extract.py @@ -60,9 +60,8 @@ def printUsage(progname): b = j['buffers'][i] if 'data' in b: if i not in buffer_name_map: - print( - "buffer %d is not found in buffer_name_map. skip printing the buffer..." % - i) + print("buffer %d is not found in buffer_name_map. skip printing the buffer..." + % i) continue filename = "%s.npy" % (buffer_name_map[i]['name']) diff --git a/tools/generate_datafile/tf_dataset_converter/argparser.py b/tools/generate_datafile/tf_dataset_converter/argparser.py index 2070d04e92e..ded858a425a 100644 --- a/tools/generate_datafile/tf_dataset_converter/argparser.py +++ b/tools/generate_datafile/tf_dataset_converter/argparser.py @@ -7,34 +7,36 @@ def _create_parser(): parser = argparse.ArgumentParser( description='Convert a dataset of tensorflow to onert format', formatter_class=argparse.RawTextHelpFormatter) - parser.add_argument('-s', - '--show-datasets', - action='store_true', - help='show dataset list') - parser.add_argument('-d', - '--dataset-name', - type=str, - default='fashion_mnist', - metavar='Dataset', - help='name of dataset to be converted (default: "fashion_mnist")') - parser.add_argument('-o', - '--out-dir', - type=str, - default='out', - metavar='Dir', - help='relative path of the files to be created (default: "out")') - parser.add_argument('-p', - '--prefix-name', - type=str, - default='', - metavar='Prefix', - help='prefix name of the file to be created (default: "")') - parser.add_argument('--split', - nargs='*', - type=str, - default=['train', 'test'], - metavar='Split', - help='Which split of the data to load (default: "train test")') + parser.add_argument( + '-s', '--show-datasets', action='store_true', help='show dataset list') + parser.add_argument( + '-d', + '--dataset-name', + type=str, + default='fashion_mnist', + metavar='Dataset', + help='name of dataset to be converted (default: "fashion_mnist")') + parser.add_argument( + '-o', + '--out-dir', + type=str, + default='out', + metavar='Dir', + help='relative path of the files to be created (default: "out")') + parser.add_argument( + '-p', + '--prefix-name', + type=str, + default='', + metavar='Prefix', + help='prefix name of the file to be created (default: "")') + parser.add_argument( + '--split', + nargs='*', + type=str, + default=['train', 'test'], + metavar='Split', + help='Which split of the data to load (default: "train test")') parser.add_argument( '--length', nargs='*', @@ -43,14 +45,15 @@ def _create_parser(): metavar='N', help='Data number for items described in split (default: "1000 100")') models = ['mnist', 'mobilenetv2'] - parser.add_argument('-m', - '--model', - type=str, - default='mnist', - choices=models, - metavar='Model', - help=('Model name to use generated data (default: mnist)\n' - 'Supported models: ' + ', '.join(models))) + parser.add_argument( + '-m', + '--model', + type=str, + default='mnist', + choices=models, + metavar='Model', + help=('Model name to use generated data (default: mnist)\n' + 'Supported models: ' + ', '.join(models))) return parser diff --git a/tools/generate_datafile/tf_dataset_converter/datasets.py b/tools/generate_datafile/tf_dataset_converter/datasets.py index 5ab7f32eb3b..ac5ae17cc58 100644 --- a/tools/generate_datafile/tf_dataset_converter/datasets.py +++ b/tools/generate_datafile/tf_dataset_converter/datasets.py @@ -17,6 +17,7 @@ def check(dataset_name): def preprocess_input(image, label): """Preprocess input data for Mnist.""" + def _normalize_img(image): """Normalize images: `uint8` -> `float32`.""" return tf.cast(image, tf.float32) / 255. @@ -34,6 +35,7 @@ def check(dataset_name): def preprocess_input(image, label): """Preprocess input data for MobileNetV2.""" + def _resize_img(image): _image = tf.cast(image, tf.float32) / 255. _image = tf.image.resize_with_crop_or_pad(_image, 224, 224) @@ -46,6 +48,7 @@ class DatasetLoader(): ''' Loader of tensorflow datasets ''' + def load(self, dataset_name, splits, model_name): ds_dict, ds_info = tfds.load( dataset_name, diff --git a/tools/kernel_report/kernel_report.py b/tools/kernel_report/kernel_report.py index c235ddc4e26..8940e88450a 100755 --- a/tools/kernel_report/kernel_report.py +++ b/tools/kernel_report/kernel_report.py @@ -172,10 +172,11 @@ def run(self): if __name__ == '__main__': arg_parser = argparse.ArgumentParser() - arg_parser.add_argument("--backends", - type=str, - default='cpu,acl_cl,acl_neon', - help="backend list to report (use comma)") + arg_parser.add_argument( + "--backends", + type=str, + default='cpu,acl_cl,acl_neon', + help="backend list to report (use comma)") arg_parser.add_argument("--md5", action='store_true', help="Print for md5") args = arg_parser.parse_args() diff --git a/tools/model_partition_tool/Graph.py b/tools/model_partition_tool/Graph.py index bec18e07c66..96caf3889b8 100644 --- a/tools/model_partition_tool/Graph.py +++ b/tools/model_partition_tool/Graph.py @@ -149,8 +149,8 @@ def initial_partition(self, modelObj, T): def summarize(self, T): self._logger.info( "Session Graph:\n%s", - np.array2string(self._session_graph, - formatter={'int': lambda x: '{:>3}'.format(x)})) + np.array2string( + self._session_graph, formatter={'int': lambda x: '{:>3}'.format(x)})) for i in range(self._K): self._logger.info("Partition %d : %s, sum weight = %s", i, self._session_ids[i].tolist(), self._session_weights[i]) @@ -441,8 +441,8 @@ def partition_minmax_multiple(self, K=3, nruns=100): session_ids[i].tolist(), session_weights[i]) self._Graphlogger.info( "Session Graph:\n%s", - np.array2string(session_graph, formatter={'int': - lambda x: '{:>3}'.format(x)})) + np.array2string( + session_graph, formatter={'int': lambda x: '{:>3}'.format(x)})) self._Graphlogger.info("Edge cut: %d", edge_cut_best) self._Graphlogger.info("Memory overhead (bytes): %d", memory_overhead_best) output_data = {} diff --git a/tools/model_partition_tool/test_partition.py b/tools/model_partition_tool/test_partition.py index c7b6323c9df..553e54c5537 100644 --- a/tools/model_partition_tool/test_partition.py +++ b/tools/model_partition_tool/test_partition.py @@ -5,15 +5,13 @@ import Graph if __name__ == "__main__": - parser = argparse.ArgumentParser("test_partition.py", - description="Example code to partition models") + parser = argparse.ArgumentParser( + "test_partition.py", description="Example code to partition models") parser.add_argument("modelfile", type=str, help="TFLite file with path") parser.add_argument("tracefile", type=str, help="Chrome trace file with path") parser.add_argument("--num_parts", type=int, default=2, help="Number of partitions") - parser.add_argument("--num_runs", - type=int, - default=10, - help="Number of runs (topological orderings)") + parser.add_argument( + "--num_runs", type=int, default=10, help="Number of runs (topological orderings)") # Parse arguments args = parser.parse_args() diff --git a/tools/nnpackage_tool/gen_golden/gen_golden.py b/tools/nnpackage_tool/gen_golden/gen_golden.py index ae5e827ce83..1291ebeebf4 100755 --- a/tools/nnpackage_tool/gen_golden/gen_golden.py +++ b/tools/nnpackage_tool/gen_golden/gen_golden.py @@ -39,11 +39,8 @@ def usage(): 'modelfile', type=str, help='path to modelfile in either graph_def (.pb) or tflite (.tflite)') - parser.add_argument('-o', - '--output', - action='store', - dest="out_dir", - help="output directory") + parser.add_argument( + '-o', '--output', action='store', dest="out_dir", help="output directory") args = parser.parse_args() if len(sys.argv) == 1: @@ -114,8 +111,8 @@ def usage(): config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True with tf.compat.v1.Session(config=config) as sess: - output_values = sess.run(output_names, - feed_dict=dict(zip(input_names, input_values))) + output_values = sess.run( + output_names, feed_dict=dict(zip(input_names, input_values))) elif extension == ".tflite": # load TFLite model and allocate tensors @@ -185,9 +182,8 @@ def usage(): if not dtype.name in supported_dtypes: print("ERR: Supported input types are {}".format(supported_dtypes)) sys.exit(-1) - val_grp.create_dataset(str(idx), - data=input_values[idx], - dtype=h5dtypes[dtype.name]) + val_grp.create_dataset( + str(idx), data=input_values[idx], dtype=h5dtypes[dtype.name]) name_grp.attrs[str(idx)] = input_names[idx] with h5py.File(out_dir + "expected.h5", 'w') as hf: @@ -198,7 +194,6 @@ def usage(): if not dtype.name in supported_dtypes: print("ERR: Supported output types are {}".format(supported_dtypes)) sys.exit(-1) - val_grp.create_dataset(str(idx), - data=output_values[idx], - dtype=h5dtypes[dtype.name]) + val_grp.create_dataset( + str(idx), data=output_values[idx], dtype=h5dtypes[dtype.name]) name_grp.attrs[str(idx)] = output_names[idx] diff --git a/tools/nnpackage_tool/model2nnpkg/model2nnpkg.py b/tools/nnpackage_tool/model2nnpkg/model2nnpkg.py index 4d7ca2ed7e0..0f38620910c 100755 --- a/tools/nnpackage_tool/model2nnpkg/model2nnpkg.py +++ b/tools/nnpackage_tool/model2nnpkg/model2nnpkg.py @@ -98,36 +98,36 @@ def _get_args(): %(prog)s -o out -p addpkg -m a1.tflite a2.tflite -i a1.json a2.json => create nnpkg "addpkg" with models a1.tflite and a2.tflite in out/ ''') - parser.add_argument('-o', - '--outdir', - type=str, - default=os.getcwd(), - metavar='output_directory', - help='set nnpkg output directory') - parser.add_argument('-p', - '--nnpkg-name', - type=str, - metavar='nnpkg_name', - help='set nnpkg output name (default=[1st modelfile name])') - parser.add_argument('-c', - '--config', - type=str, - nargs='+', - default='', - metavar='conf', - help='provide configuration files') - parser.add_argument('-m', - '--models', - type=str, - nargs='+', - metavar='model', - help='provide model files') - parser.add_argument('-i', - '--io-info', - type=str, - nargs='+', - metavar='io_info', - help='provide io info') + parser.add_argument( + '-o', + '--outdir', + type=str, + default=os.getcwd(), + metavar='output_directory', + help='set nnpkg output directory') + parser.add_argument( + '-p', + '--nnpkg-name', + type=str, + metavar='nnpkg_name', + help='set nnpkg output name (default=[1st modelfile name])') + parser.add_argument( + '-c', + '--config', + type=str, + nargs='+', + default='', + metavar='conf', + help='provide configuration files') + parser.add_argument( + '-m', + '--models', + type=str, + nargs='+', + metavar='model', + help='provide model files') + parser.add_argument( + '-i', '--io-info', type=str, nargs='+', metavar='io_info', help='provide io info') args = parser.parse_args() diff --git a/tools/nnpackage_tool/nnpackager/nnpackager.py b/tools/nnpackage_tool/nnpackager/nnpackager.py index 54466b5dc44..a288ef856b6 100755 --- a/tools/nnpackage_tool/nnpackager/nnpackager.py +++ b/tools/nnpackage_tool/nnpackager/nnpackager.py @@ -62,14 +62,13 @@ def compress(path): if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('path', type=str, help='the path to nnpackage') - parser.add_argument('-v', - '--verify', - action='store_true', - help="verify nnpackage (default: false)") - parser.add_argument('-c', - '--compress', - action='store_true', - help="compress nnpackage (default: false)") + parser.add_argument( + '-v', '--verify', action='store_true', help="verify nnpackage (default: false)") + parser.add_argument( + '-c', + '--compress', + action='store_true', + help="compress nnpackage (default: false)") args = parser.parse_args() diff --git a/tools/nnpackage_tool/qnf/qnf.py b/tools/nnpackage_tool/qnf/qnf.py index 0cc439e497a..0e027712517 100644 --- a/tools/nnpackage_tool/qnf/qnf.py +++ b/tools/nnpackage_tool/qnf/qnf.py @@ -99,15 +99,11 @@ def _dequantize_output0(data): def makeArgParser(): parser = argparse.ArgumentParser() - parser.add_argument('h5', - type=str, - help='path to h5 file either input or output to model') + parser.add_argument( + 'h5', type=str, help='path to h5 file either input or output to model') parser.add_argument('circle', type=str, help='path to quantized circle model') - parser.add_argument('-o', - '--output', - action='store', - dest="out_path", - help="output file") + parser.add_argument( + '-o', '--output', action='store', dest="out_path", help="output file") group = parser.add_mutually_exclusive_group() group.add_argument( '-q', diff --git a/tools/nnpackage_tool/sth2nnpkgtc/pb_select_graph.py b/tools/nnpackage_tool/sth2nnpkgtc/pb_select_graph.py index 68d0ad2f7ec..24e1f308b85 100755 --- a/tools/nnpackage_tool/sth2nnpkgtc/pb_select_graph.py +++ b/tools/nnpackage_tool/sth2nnpkgtc/pb_select_graph.py @@ -32,17 +32,12 @@ def usage(): parser = argparse.ArgumentParser() parser.add_argument('graph_def', type=str, help='path to graph_def (pb)') parser.add_argument('input_names', type=str, help='input tensor names separated by ,') - parser.add_argument('output_names', - type=str, - help='output tensor names separated by ,') - parser.add_argument('graph_outname', - type=str, - help='graph_def base name for selected subgraph') - parser.add_argument('-o', - '--output', - action='store', - dest="out_dir", - help="output directory") + parser.add_argument( + 'output_names', type=str, help='output tensor names separated by ,') + parser.add_argument( + 'graph_outname', type=str, help='graph_def base name for selected subgraph') + parser.add_argument( + '-o', '--output', action='store', dest="out_dir", help="output directory") args = parser.parse_args() filename = args.graph_def diff --git a/tools/pareto_profiler/estimator/Hlps.py b/tools/pareto_profiler/estimator/Hlps.py index 31f748374b2..ba0925d6fb2 100644 --- a/tools/pareto_profiler/estimator/Hlps.py +++ b/tools/pareto_profiler/estimator/Hlps.py @@ -26,6 +26,7 @@ class Hlps: """ Initialize Runner and Pareto data structure """ + def __init__(self, runner, num_backends, num_samples): self._runner = runner self._num_backends = num_backends @@ -186,10 +187,8 @@ def sigint_handler(signum, frame): round_cnt += 1 utils.progressbar(round_cnt, nsolutions, prefix="% samples computed. : ") - self._pareto_obj.update_pareto_solutions(s, - time_val, - memory_val, - explore_flag=True) + self._pareto_obj.update_pareto_solutions( + s, time_val, memory_val, explore_flag=True) for key in self._pareto_obj.get_pareto_keys(): pareto_sample = self._pareto_obj.get_config(key) diff --git a/tools/pareto_profiler/estimator/brute_force_profiler.py b/tools/pareto_profiler/estimator/brute_force_profiler.py index 152cafc6ade..b9988b9f980 100644 --- a/tools/pareto_profiler/estimator/brute_force_profiler.py +++ b/tools/pareto_profiler/estimator/brute_force_profiler.py @@ -7,8 +7,8 @@ from utils import progressbar if __name__ == "__main__": - parser = ProfileArgs(prog="brute_force_profiler.py", - description="Profiles onert_run using oplist") + parser = ProfileArgs( + prog="brute_force_profiler.py", description="Profiles onert_run using oplist") # Parse arguments args = parser.parse_args() modelfile = args.model diff --git a/tools/pareto_profiler/estimator/hlps_sampler.py b/tools/pareto_profiler/estimator/hlps_sampler.py index aae857fb8b0..a4c1e4fd8ee 100644 --- a/tools/pareto_profiler/estimator/hlps_sampler.py +++ b/tools/pareto_profiler/estimator/hlps_sampler.py @@ -66,29 +66,30 @@ def hlps_profiler(modelfile, parser = ProfileArgs( "hlps_on_device.py", description="On-Device Optimizing Profiler for TensorFlowLite Models") - parser.add_argument('--iterations', - type=int, - default=3, - help='Number of iterations, less than 10 should be enough') - parser.add_argument('--samples', - type=int, - default=2000, - help='Number of samples per iteration') - parser.add_argument('--offline', - type=bool, - default=False, - help='Set to True for running over profiled data') + parser.add_argument( + '--iterations', + type=int, + default=3, + help='Number of iterations, less than 10 should be enough') + parser.add_argument( + '--samples', type=int, default=2000, help='Number of samples per iteration') + parser.add_argument( + '--offline', + type=bool, + default=False, + help='Set to True for running over profiled data') parser.add_argument('--profiled_data', type=str, help='Profile file with path') args = parser.parse_args() - hlps_profiler(args.model, - args.run_folder, - num_backends=args.backends, - mode=args.mode, - nruns=args.iterations, - num_samples=args.samples, - dumpfile=args.dumpfile) + hlps_profiler( + args.model, + args.run_folder, + num_backends=args.backends, + mode=args.mode, + nruns=args.iterations, + num_samples=args.samples, + dumpfile=args.dumpfile) t_end = time.time() with open(args.dumpfile, "r") as ifile: dumpdata = json.load(ifile) diff --git a/tools/pareto_profiler/estimator/pareto.py b/tools/pareto_profiler/estimator/pareto.py index fa0e1019f2d..9c62eb35814 100644 --- a/tools/pareto_profiler/estimator/pareto.py +++ b/tools/pareto_profiler/estimator/pareto.py @@ -40,12 +40,10 @@ def update_pareto_solutions(self, sample, exec_time, max_rss, explore_flag=False new_item = True if self._pareto_solutions: for key in list(self._pareto_solutions): - if self._pareto_solutions[key][0] < exec_time and self._pareto_solutions[ - key][1] < max_rss: + if self._pareto_solutions[key][0] < exec_time and self._pareto_solutions[key][1] < max_rss: new_item = False break - elif self._pareto_solutions[key][ - 0] > exec_time and self._pareto_solutions[key][1] > max_rss: + elif self._pareto_solutions[key][0] > exec_time and self._pareto_solutions[key][1] > max_rss: self.add_pareto_entry(sample, exec_time, max_rss, key, explore_flag, True) new_item = False diff --git a/tools/pareto_profiler/estimator/profile_args.py b/tools/pareto_profiler/estimator/profile_args.py index 180ff1feebd..4690d127fab 100644 --- a/tools/pareto_profiler/estimator/profile_args.py +++ b/tools/pareto_profiler/estimator/profile_args.py @@ -20,18 +20,18 @@ class ProfileArgs(argparse.ArgumentParser): def __init__(self, *args, **kwargs): super(ProfileArgs, self).__init__(args, kwargs) - self.add_argument('model', - type=str, - default=None, - help='nnpackage name with path') + self.add_argument( + 'model', type=str, default=None, help='nnpackage name with path') self.add_argument('run_folder', type=str, help="path to onert_run executable") - self.add_argument('--mode', - type=str.lower, - choices=["index", "name"], - default="name", - help='Profile by operation index or name') + self.add_argument( + '--mode', + type=str.lower, + choices=["index", "name"], + default="name", + help='Profile by operation index or name') self.add_argument('--backends', type=int, default=2, help='Number of backends') - self.add_argument('--dumpfile', - type=str.lower, - default="/tmp/final_result.json", - help='JSON Dumpfile name with path') + self.add_argument( + '--dumpfile', + type=str.lower, + default="/tmp/final_result.json", + help='JSON Dumpfile name with path') diff --git a/tools/pareto_profiler/estimator/random_sampler.py b/tools/pareto_profiler/estimator/random_sampler.py index e166e9f0b30..7646ea62cf6 100644 --- a/tools/pareto_profiler/estimator/random_sampler.py +++ b/tools/pareto_profiler/estimator/random_sampler.py @@ -13,10 +13,8 @@ if __name__ == "__main__": t_start = time.time() parser = ProfileArgs("random_sampler.py", description="Random sampler") - parser.add_argument('--iterations', - type=int, - default=100, - help='Number of iterations') + parser.add_argument( + '--iterations', type=int, default=100, help='Number of iterations') # Parse arguments args = parser.parse_args() diff --git a/tools/pareto_profiler/generator/gen_oplist.py b/tools/pareto_profiler/generator/gen_oplist.py index c9d667ab1d9..75a74383ae1 100644 --- a/tools/pareto_profiler/generator/gen_oplist.py +++ b/tools/pareto_profiler/generator/gen_oplist.py @@ -114,21 +114,21 @@ def generate_oplist_by_name_size(tflite_file): parser = argparse.ArgumentParser( description='''gen_backend: Generates oplist and uploads to target''', epilog="""Success.""") - parser.add_argument('--auth', - type=str, - default=None, - help='authentication: ') - parser.add_argument('--mode', - type=str.lower, - choices=["index", "name"], - default="name", - help='Profile by operation index or name') + parser.add_argument( + '--auth', type=str, default=None, help='authentication: ') + parser.add_argument( + '--mode', + type=str.lower, + choices=["index", "name"], + default="name", + help='Profile by operation index or name') parser.add_argument('model', type=str, default=None, help='tflite name with path') - parser.add_argument('target', - type=str.lower, - choices=['tizen', 'odroid'], - default="odroid", - help='target name') + parser.add_argument( + 'target', + type=str.lower, + choices=['tizen', 'odroid'], + default="odroid", + help='target name') # Parse arguments args = parser.parse_args() diff --git a/tools/pbfile_tool/extract_subgraph.py b/tools/pbfile_tool/extract_subgraph.py index b04d91c77ae..06135990a9b 100755 --- a/tools/pbfile_tool/extract_subgraph.py +++ b/tools/pbfile_tool/extract_subgraph.py @@ -31,9 +31,10 @@ def extract_subgraph(pb_path, output_node_names): parser = argparse.ArgumentParser(description='Extract subgraph from pb file') parser.add_argument("input_file", help="pb file to read") - parser.add_argument("--output_node_names", - help="A list of strings specifying the destination node names.", - required=True) + parser.add_argument( + "--output_node_names", + help="A list of strings specifying the destination node names.", + required=True) parser.add_argument("output_file", help="pb file to write") args = parser.parse_args() diff --git a/tools/pbfile_tool/pb_info.py b/tools/pbfile_tool/pb_info.py index a236cfa36f8..7add94fa821 100755 --- a/tools/pbfile_tool/pb_info.py +++ b/tools/pbfile_tool/pb_info.py @@ -145,9 +145,8 @@ def print_summary(pb_path, optype_substring, name_prefix): parser.add_argument( "op_subst", help="substring of operations. only info of these operasions will be printed.") - parser.add_argument("--summary", - help="print summary of operations", - action="store_true") + parser.add_argument( + "--summary", help="print summary of operations", action="store_true") parser.add_argument("--name_prefix", help="filtered by speficied name prefix") args = parser.parse_args() diff --git a/tools/stab/backend_profiler.py b/tools/stab/backend_profiler.py index 1734317127e..c9d71332df4 100644 --- a/tools/stab/backend_profiler.py +++ b/tools/stab/backend_profiler.py @@ -25,6 +25,7 @@ class BackendProfiler(): TODO : Support Android device profiling """ + def __init__(self, user, ip, nnpackage_dir, num_threads): self.remote_ssh = RemoteSSH(user, ip, nnpackage_dir, num_threads) self.backend_op_list = OpListParser().parse() diff --git a/tools/stab/backend_scheduler.py b/tools/stab/backend_scheduler.py index 57d1a15e183..f2cf463b9a9 100644 --- a/tools/stab/backend_scheduler.py +++ b/tools/stab/backend_scheduler.py @@ -27,6 +27,7 @@ class BackendScheduler: TODO : Use permutation time for better scheduling """ + def __init__(self, nnpkg_dir, num_threads): self.nnpkg_dir = Path(nnpkg_dir).resolve() self.num_threads = num_threads diff --git a/tools/stab/nnpkg_helper.py b/tools/stab/nnpkg_helper.py index 816d1865d72..7e68760ffaf 100644 --- a/tools/stab/nnpkg_helper.py +++ b/tools/stab/nnpkg_helper.py @@ -23,6 +23,7 @@ class NnpkgHelper: """ Helper class for nnpackage """ + def __init__(self): self.config_name = 'config.cfg' diff --git a/tools/stab/op_list_parser.py b/tools/stab/op_list_parser.py index 3dbe20c5d40..d9fba508b96 100644 --- a/tools/stab/op_list_parser.py +++ b/tools/stab/op_list_parser.py @@ -23,6 +23,7 @@ class OpListParser(): TODO : Reads supported tensor type for each operation (FP32 or INT8) """ + def __init__(self): self.file_name = "op_list.txt" self.op_list_file = Path(__file__).parent / self.file_name diff --git a/tools/stab/remote.py b/tools/stab/remote.py index 957c66cd227..2a22f936afe 100644 --- a/tools/stab/remote.py +++ b/tools/stab/remote.py @@ -24,6 +24,7 @@ class RemoteSSH(): TODO : Using SSH library instead of direct ssh call """ + def __init__(self, user, ip, nnpkg_dir, num_threads): self.base_dir = Path('/tmp/ONE') self.trace_dir = 'traces' diff --git a/tools/stab/stab.py b/tools/stab/stab.py index 45bd7075764..7a069df5d29 100644 --- a/tools/stab/stab.py +++ b/tools/stab/stab.py @@ -35,41 +35,39 @@ def main(args): optional = arg_parser.add_argument_group('optional arguments') # Add back help - optional.add_argument('-h', - '--help', - action='help', - default=argparse.SUPPRESS, - help='show this help message and exit') - required.add_argument("--nnpackage", - type=str, - required=True, - help="nnpackage folder to profile") - required.add_argument("--ip", - type=str, - required=True, - help="IP address of remote client") - optional.add_argument("-n", - "--num_threads", - type=int, - default=1, - help="Number of threads used by one runtime") + optional.add_argument( + '-h', + '--help', + action='help', + default=argparse.SUPPRESS, + help='show this help message and exit') + required.add_argument( + "--nnpackage", type=str, required=True, help="nnpackage folder to profile") + required.add_argument( + "--ip", type=str, required=True, help="IP address of remote client") + optional.add_argument( + "-n", + "--num_threads", + type=int, + default=1, + help="Number of threads used by one runtime") optional.add_argument("-u", "--user", type=str, help="User of remote client") - optional.add_argument("-v", - "--verbose", - action='store_const', - dest="verbose_level", - default=logging.INFO, - const=logging.DEBUG, - help="Print verbose message") - optional.add_argument("--no-profile", - dest='profile', - action='store_false', - help="Disable profiling") + optional.add_argument( + "-v", + "--verbose", + action='store_const', + dest="verbose_level", + default=logging.INFO, + const=logging.DEBUG, + help="Print verbose message") + optional.add_argument( + "--no-profile", dest='profile', action='store_false', help="Disable profiling") optional.set_defaults(profile=True) args = arg_parser.parse_args() - logging.basicConfig(stream=sys.stdout, - level=args.verbose_level, - format="[%(levelname).5s] %(message)s") + logging.basicConfig( + stream=sys.stdout, + level=args.verbose_level, + format="[%(levelname).5s] %(message)s") main(args) diff --git a/tools/tensorflow_model_freezer/base_freezer.py b/tools/tensorflow_model_freezer/base_freezer.py index 788976e8110..a365a780633 100755 --- a/tools/tensorflow_model_freezer/base_freezer.py +++ b/tools/tensorflow_model_freezer/base_freezer.py @@ -75,25 +75,26 @@ def createTFInput(self, tensor, input_list): tf_tensor = tf.placeholder(shape=[], dtype=tensor.getDType()) input_list.append(tf_tensor) else: - tf_tensor = tf.constant(value=tensor.getConstVal(), - dtype=tensor.getDType()) + tf_tensor = tf.constant( + value=tensor.getConstVal(), dtype=tensor.getDType()) else: if (tensor.getConstVal() == None): - tf_tensor = tf.placeholder(shape=tensor.getShape(), - dtype=tensor.getDType()) + tf_tensor = tf.placeholder( + shape=tensor.getShape(), dtype=tensor.getDType()) input_list.append(tf_tensor) else: - tf_tensor = tf.constant(shape=tensor.getShape(), - value=tensor.getConstVal(), - dtype=tensor.getDType()) + tf_tensor = tf.constant( + shape=tensor.getShape(), + value=tensor.getConstVal(), + dtype=tensor.getDType()) return tf_tensor def saveRelatedFiles(self, sess, input_node_list, output_node_list, fn_prefix): # saves pb, pbtxt, chpt files and then freeze graph under top_node_name into directory # produce pb, pbtxt, and ckpt files - (pb_path, pbtxt_path, - checkpoint_path) = util.savePbAndCkpt(sess, self.getOutputDirectory(), fn_prefix) + (pb_path, pbtxt_path, checkpoint_path) = util.savePbAndCkpt( + sess, self.getOutputDirectory(), fn_prefix) print("") print("# 1. Created Tensorflow model files :\n\t-{}\n\t-{}\n\t-{}\n".format( @@ -169,14 +170,12 @@ def createSaveFreezeModel(self): print("# files will be saved into " + self.getOutputDirectory()) # build model - (input_node_list, - output_node_list) = self.buildModel(sess, test_cases.get(tc_name), - tc_name) + (input_node_list, output_node_list) = self.buildModel( + sess, test_cases.get(tc_name), tc_name) # Now, save to proto buffer format and checkpoint - (pb_path, frozen_pb_path, - tflite_path) = self.saveRelatedFiles(sess, input_node_list, - output_node_list, tc_name) + (pb_path, frozen_pb_path, tflite_path) = self.saveRelatedFiles( + sess, input_node_list, output_node_list, tc_name) sess.close() # when there is not test cases but the model itself @@ -192,12 +191,11 @@ def createSaveFreezeModel(self): print("# files will be saved into " + self.getOutputDirectory()) # build model - (input_node_list, - output_node_list) = self.buildModel(sess, test_cases.get(tc_name), tc_name) + (input_node_list, output_node_list) = self.buildModel( + sess, test_cases.get(tc_name), tc_name) # Now, save to proto buffer format and checkpoint - (pb_path, frozen_pb_path, - tflite_path) = self.saveRelatedFiles(sess, input_node_list, output_node_list, - tc_name) + (pb_path, frozen_pb_path, tflite_path) = self.saveRelatedFiles( + sess, input_node_list, output_node_list, tc_name) sess.close() diff --git a/tools/tensorflow_model_freezer/model_freezer_util.py b/tools/tensorflow_model_freezer/model_freezer_util.py index 7494588e2c0..3fdbba7854b 100755 --- a/tools/tensorflow_model_freezer/model_freezer_util.py +++ b/tools/tensorflow_model_freezer/model_freezer_util.py @@ -123,9 +123,8 @@ def savePbAndCkpt(sess, directory, fn_prefix): saver = tf.train.Saver() saver.save(sess, os.path.join(directory, 'checkoiint', fn_prefix + '.ckpt')) - return (os.path.join(directory, - fn_prefix + '.pb'), os.path.join(directory, - fn_prefix + '.pbtxt'), + return (os.path.join(directory, fn_prefix + '.pb'), + os.path.join(directory, fn_prefix + '.pbtxt'), os.path.join(directory, 'checkoiint', fn_prefix + '.ckpt')) diff --git a/tools/tensorflow_model_freezer/sample/ARGMAX_gen.py b/tools/tensorflow_model_freezer/sample/ARGMAX_gen.py index b1ee003ecef..0fa019a2ab7 100755 --- a/tools/tensorflow_model_freezer/sample/ARGMAX_gen.py +++ b/tools/tensorflow_model_freezer/sample/ARGMAX_gen.py @@ -28,6 +28,7 @@ class Gen(base.BaseFreezer): ''' class to generate tflite files for MUL ''' + def __init__(self, path): super(Gen, self).__init__(path) @@ -68,9 +69,8 @@ def buildModel(self, sess, test_case_tensor, tc_name): # The next lines insert such (CONST) to prevent such error. # So, Graph.pb/pbtxt contains this garbage info, # but this garbage info will be removed in Graph_frozen.pb/pbtxt - garbage = tf.get_variable("garbage", [1], - dtype=tf.float32, - initializer=tf.zeros_initializer()) + garbage = tf.get_variable( + "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer()) init_op = tf.global_variables_initializer() garbage_value = [0] sess.run(tf.assign(garbage, garbage_value)) diff --git a/tools/tensorflow_model_freezer/sample/ARGMIN_gen.py b/tools/tensorflow_model_freezer/sample/ARGMIN_gen.py index 539a59122a1..9406d425503 100755 --- a/tools/tensorflow_model_freezer/sample/ARGMIN_gen.py +++ b/tools/tensorflow_model_freezer/sample/ARGMIN_gen.py @@ -28,6 +28,7 @@ class Gen(base.BaseFreezer): ''' class to generate tflite files for MUL ''' + def __init__(self, path): super(Gen, self).__init__(path) @@ -68,9 +69,8 @@ def buildModel(self, sess, test_case_tensor, tc_name): # The next lines insert such (CONST) to prevent such error. # So, Graph.pb/pbtxt contains this garbage info, # but this garbage info will be removed in Graph_frozen.pb/pbtxt - garbage = tf.get_variable("garbage", [1], - dtype=tf.float32, - initializer=tf.zeros_initializer()) + garbage = tf.get_variable( + "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer()) init_op = tf.global_variables_initializer() garbage_value = [0] sess.run(tf.assign(garbage, garbage_value)) diff --git a/tools/tensorflow_model_freezer/sample/DIV_gen.py b/tools/tensorflow_model_freezer/sample/DIV_gen.py index fa0a191dbf8..f4f319f7333 100755 --- a/tools/tensorflow_model_freezer/sample/DIV_gen.py +++ b/tools/tensorflow_model_freezer/sample/DIV_gen.py @@ -28,6 +28,7 @@ class Gen(base.BaseFreezer): ''' class to generate tflite files for div ''' + def __init__(self, path): super(Gen, self).__init__(path) @@ -109,9 +110,8 @@ def buildModel(self, sess, test_case_tensor, tc_name): # The next lines insert such (CONST) to prevent such error. # So, Graph.pb/pbtxt contains this garbage info, # but this garbage info will be removed in Graph_frozen.pb/pbtxt - garbage = tf.get_variable("garbage", [1], - dtype=tf.float32, - initializer=tf.zeros_initializer()) + garbage = tf.get_variable( + "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer()) init_op = tf.global_variables_initializer() garbage_value = [0] sess.run(tf.assign(garbage, garbage_value)) diff --git a/tools/tensorflow_model_freezer/sample/LOGICAL_AND_gen.py b/tools/tensorflow_model_freezer/sample/LOGICAL_AND_gen.py index 1568755b187..fdd4c45a75c 100755 --- a/tools/tensorflow_model_freezer/sample/LOGICAL_AND_gen.py +++ b/tools/tensorflow_model_freezer/sample/LOGICAL_AND_gen.py @@ -28,6 +28,7 @@ class Gen(base.BaseFreezer): ''' class to generate tflite files for MUL ''' + def __init__(self, path): super(Gen, self).__init__(path) @@ -62,9 +63,10 @@ def buildModel(self, sess, test_case_tensor, tc_name): x_tensor = self.createTFInput(test_case_tensor[0], input_list) y_tensor = self.createTFInput(test_case_tensor[1], input_list) - output_node = tf.logical_and(tf.greater(x_tensor, tf.constant(0.0)), - tf.less(y_tensor, tf.constant(1.0)), - name=tc_name) + output_node = tf.logical_and( + tf.greater(x_tensor, tf.constant(0.0)), + tf.less(y_tensor, tf.constant(1.0)), + name=tc_name) # ------ modify UNTIL here for your model -------# @@ -72,9 +74,8 @@ def buildModel(self, sess, test_case_tensor, tc_name): # The next lines insert such (CONST) to prevent such error. # So, Graph.pb/pbtxt contains this garbage info, # but this garbage info will be removed in Graph_frozen.pb/pbtxt - garbage = tf.get_variable("garbage", [1], - dtype=tf.float32, - initializer=tf.zeros_initializer()) + garbage = tf.get_variable( + "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer()) init_op = tf.global_variables_initializer() garbage_value = [0] sess.run(tf.assign(garbage, garbage_value)) diff --git a/tools/tensorflow_model_freezer/sample/LOGICAL_NOT_gen.py b/tools/tensorflow_model_freezer/sample/LOGICAL_NOT_gen.py index d9713601523..297a5aca2d9 100755 --- a/tools/tensorflow_model_freezer/sample/LOGICAL_NOT_gen.py +++ b/tools/tensorflow_model_freezer/sample/LOGICAL_NOT_gen.py @@ -28,6 +28,7 @@ class Gen(base.BaseFreezer): ''' class to generate tflite files for MUL ''' + def __init__(self, path): super(Gen, self).__init__(path) @@ -68,9 +69,8 @@ def buildModel(self, sess, test_case_tensor, tc_name): # The next lines insert such (CONST) to prevent such error. # So, Graph.pb/pbtxt contains this garbage info, # but this garbage info will be removed in Graph_frozen.pb/pbtxt - garbage = tf.get_variable("garbage", [1], - dtype=tf.float32, - initializer=tf.zeros_initializer()) + garbage = tf.get_variable( + "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer()) init_op = tf.global_variables_initializer() garbage_value = [0] sess.run(tf.assign(garbage, garbage_value)) diff --git a/tools/tensorflow_model_freezer/sample/LOGICAL_OR_gen.py b/tools/tensorflow_model_freezer/sample/LOGICAL_OR_gen.py index 4ed0c9f23a2..70b89798959 100755 --- a/tools/tensorflow_model_freezer/sample/LOGICAL_OR_gen.py +++ b/tools/tensorflow_model_freezer/sample/LOGICAL_OR_gen.py @@ -28,6 +28,7 @@ class Gen(base.BaseFreezer): ''' class to generate tflite files for MUL ''' + def __init__(self, path): super(Gen, self).__init__(path) @@ -62,18 +63,18 @@ def buildModel(self, sess, test_case_tensor, tc_name): x_tensor = self.createTFInput(test_case_tensor[0], input_list) y_tensor = self.createTFInput(test_case_tensor[1], input_list) - output_node = tf.logical_or(tf.greater(x_tensor, tf.constant(0.0)), - tf.less(y_tensor, tf.constant(1.0)), - name=tc_name) + output_node = tf.logical_or( + tf.greater(x_tensor, tf.constant(0.0)), + tf.less(y_tensor, tf.constant(1.0)), + name=tc_name) # ------ modify UNTIL here for your model -------# # Note if don't have any CONST value, creating checkpoint file fails. # The next lines insert such (CONST) to prevent such error. # So, Graph.pb/pbtxt contains this garbage info, # but this garbage info will be removed in Graph_frozen.pb/pbtxt - garbage = tf.get_variable("garbage", [1], - dtype=tf.float32, - initializer=tf.zeros_initializer()) + garbage = tf.get_variable( + "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer()) init_op = tf.global_variables_initializer() garbage_value = [0] sess.run(tf.assign(garbage, garbage_value)) diff --git a/tools/tensorflow_model_freezer/sample/MUL_gen.py b/tools/tensorflow_model_freezer/sample/MUL_gen.py index 85acf7c56cd..0c7056b824b 100755 --- a/tools/tensorflow_model_freezer/sample/MUL_gen.py +++ b/tools/tensorflow_model_freezer/sample/MUL_gen.py @@ -28,6 +28,7 @@ class Gen(base.BaseFreezer): ''' class to generate tflite files for MUL ''' + def __init__(self, path): super(Gen, self).__init__(path) @@ -89,9 +90,8 @@ def buildModel(self, sess, test_case_tensor, tc_name): # The next lines insert such (CONST) to prevent such error. # So, Graph.pb/pbtxt contains this garbage info, # but this garbage info will be removed in Graph_frozen.pb/pbtxt - garbage = tf.get_variable("garbage", [1], - dtype=tf.float32, - initializer=tf.zeros_initializer()) + garbage = tf.get_variable( + "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer()) init_op = tf.global_variables_initializer() garbage_value = [0] sess.run(tf.assign(garbage, garbage_value)) diff --git a/tools/tensorflow_model_freezer/sample/Operation_gen.py b/tools/tensorflow_model_freezer/sample/Operation_gen.py index 68326e4a230..3a810e53d5a 100755 --- a/tools/tensorflow_model_freezer/sample/Operation_gen.py +++ b/tools/tensorflow_model_freezer/sample/Operation_gen.py @@ -50,9 +50,8 @@ def buildModel(self, sess, test_case_tensor, tc_name): # The next lines insert such (CONST) to prevent such error. # So, Graph.pb/pbtxt contains this garbage info, # but this garbage info will be removed in Graph_frozen.pb/pbtxt - garbage = tf.get_variable("garbage", [1], - dtype=tf.float32, - initializer=tf.zeros_initializer()) + garbage = tf.get_variable( + "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer()) init_op = tf.global_variables_initializer() garbage_value = [0] sess.run(tf.assign(garbage, garbage_value)) @@ -96,9 +95,8 @@ def buildModel(self, sess, test_case_tensor, tc_name): # The next lines insert such (CONST) to prevent such error. # So, Graph.pb/pbtxt contains this garbage info, # but this garbage info will be removed in Graph_frozen.pb/pbtxt - garbage = tf.get_variable("garbage", [1], - dtype=tf.float32, - initializer=tf.zeros_initializer()) + garbage = tf.get_variable( + "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer()) init_op = tf.global_variables_initializer() garbage_value = [0] sess.run(tf.assign(garbage, garbage_value)) @@ -136,9 +134,8 @@ def buildModel(self, sess, test_case_tensor, tc_name): # The next lines insert such (CONST) to prevent such error. # So, Graph.pb/pbtxt contains this garbage info, # but this garbage info will be removed in Graph_frozen.pb/pbtxt - garbage = tf.get_variable("garbage", [1], - dtype=tf.float32, - initializer=tf.zeros_initializer()) + garbage = tf.get_variable( + "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer()) init_op = tf.global_variables_initializer() garbage_value = [0] sess.run(tf.assign(garbage, garbage_value)) @@ -176,9 +173,8 @@ def buildModel(self, sess, test_case_tensor, tc_name): # The next lines insert such (CONST) to prevent such error. # So, Graph.pb/pbtxt contains this garbage info, # but this garbage info will be removed in Graph_frozen.pb/pbtxt - garbage = tf.get_variable("garbage", [1], - dtype=tf.float32, - initializer=tf.zeros_initializer()) + garbage = tf.get_variable( + "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer()) init_op = tf.global_variables_initializer() garbage_value = [0] sess.run(tf.assign(garbage, garbage_value)) diff --git a/tools/tensorflow_model_freezer/sample/SQUEEZE_gen.py b/tools/tensorflow_model_freezer/sample/SQUEEZE_gen.py index 12a176d749c..a4d802ec528 100755 --- a/tools/tensorflow_model_freezer/sample/SQUEEZE_gen.py +++ b/tools/tensorflow_model_freezer/sample/SQUEEZE_gen.py @@ -28,6 +28,7 @@ class Gen(base.BaseFreezer): ''' class to generate tflite files for Squeeze ''' + def __init__(self, path): super(Gen, self).__init__(path) @@ -79,8 +80,8 @@ def buildModel(self, sess, test_case_tensor, tc_name): if len(test_case_tensor) == 1: output_node = tf.squeeze(input=x_tensor, name=tc_name) # do not modify name else: - output_node = tf.squeeze(input=x_tensor, axis=axis_tensor, - name=tc_name) # do not modify name + output_node = tf.squeeze( + input=x_tensor, axis=axis_tensor, name=tc_name) # do not modify name # ------ modify UNTIL here for your model -------# @@ -88,9 +89,8 @@ def buildModel(self, sess, test_case_tensor, tc_name): # The next lines insert such (CONST) to prevent such error. # So, Graph.pb/pbtxt contains this garbage info, # but this garbage info will be removed in Graph_frozen.pb/pbtxt - garbage = tf.get_variable("garbage", [1], - dtype=tf.float32, - initializer=tf.zeros_initializer()) + garbage = tf.get_variable( + "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer()) init_op = tf.global_variables_initializer() garbage_value = [0] sess.run(tf.assign(garbage, garbage_value)) diff --git a/tools/tensorflow_model_freezer/sample/STACK_gen.py b/tools/tensorflow_model_freezer/sample/STACK_gen.py index 004a54dc0f1..3eee6240cbf 100755 --- a/tools/tensorflow_model_freezer/sample/STACK_gen.py +++ b/tools/tensorflow_model_freezer/sample/STACK_gen.py @@ -28,6 +28,7 @@ class Gen(base.BaseFreezer): ''' class to generate tflite files for MUL ''' + def __init__(self, path): super(Gen, self).__init__(path) @@ -69,9 +70,8 @@ def buildModel(self, sess, test_case_tensor, tc_name): # The next lines insert such (CONST) to prevent such error. # So, Graph.pb/pbtxt contains this garbage info, # but this garbage info will be removed in Graph_frozen.pb/pbtxt - garbage = tf.get_variable("garbage", [1], - dtype=tf.float32, - initializer=tf.zeros_initializer()) + garbage = tf.get_variable( + "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer()) init_op = tf.global_variables_initializer() garbage_value = [0] sess.run(tf.assign(garbage, garbage_value)) diff --git a/tools/tensorflow_model_freezer/sample/TOPK_gen.py b/tools/tensorflow_model_freezer/sample/TOPK_gen.py index eeee1944e9f..27b6f60a8b3 100755 --- a/tools/tensorflow_model_freezer/sample/TOPK_gen.py +++ b/tools/tensorflow_model_freezer/sample/TOPK_gen.py @@ -28,6 +28,7 @@ class Gen(base.BaseFreezer): ''' class to generate tflite file for TOPK ''' + def __init__(self, path): super(Gen, self).__init__(path) @@ -83,9 +84,8 @@ def buildModel(self, sess, test_case_tensor, tc_name): # The next lines insert such (CONST) to prevent such error. # So, Graph.pb/pbtxt contains this garbage info, # but this garbage info will be removed in Graph_frozen.pb/pbtxt - garbage = tf.get_variable("garbage", [1], - dtype=tf.float32, - initializer=tf.zeros_initializer()) + garbage = tf.get_variable( + "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer()) init_op = tf.global_variables_initializer() garbage_value = [0] sess.run(tf.assign(garbage, garbage_value)) diff --git a/tools/tensorflow_model_freezer/sample/UNSTACK_gen.py b/tools/tensorflow_model_freezer/sample/UNSTACK_gen.py index 42f5ef5465e..3cee7459f50 100755 --- a/tools/tensorflow_model_freezer/sample/UNSTACK_gen.py +++ b/tools/tensorflow_model_freezer/sample/UNSTACK_gen.py @@ -28,6 +28,7 @@ class Gen(base.BaseFreezer): ''' class to generate tflite files for MUL ''' + def __init__(self, path): super(Gen, self).__init__(path) @@ -68,9 +69,8 @@ def buildModel(self, sess, test_case_tensor, tc_name): # The next lines insert such (CONST) to prevent such error. # So, Graph.pb/pbtxt contains this garbage info, # but this garbage info will be removed in Graph_frozen.pb/pbtxt - garbage = tf.get_variable("garbage", [1], - dtype=tf.float32, - initializer=tf.zeros_initializer()) + garbage = tf.get_variable( + "garbage", [1], dtype=tf.float32, initializer=tf.zeros_initializer()) init_op = tf.global_variables_initializer() garbage_value = [0] sess.run(tf.assign(garbage, garbage_value)) diff --git a/tools/tflitefile_tool/model_parser.py b/tools/tflitefile_tool/model_parser.py index b74c3b29c36..76c43acfc50 100755 --- a/tools/tflitefile_tool/model_parser.py +++ b/tools/tflitefile_tool/model_parser.py @@ -86,29 +86,24 @@ def SaveSubgraph(option, subg): if __name__ == '__main__': # Define argument and read arg_parser = argparse.ArgumentParser() - arg_parser.add_argument("input_file", - type=argparse.FileType('rb'), - help="tflite file to read") - arg_parser.add_argument('-v', - '--verbose', - type=int, - default=1, - help="set print level (0~1, default: 1)") - arg_parser.add_argument('-t', - '--tensor', - nargs='*', - help="tensor ID to print information (default: all)") - arg_parser.add_argument('-o', - '--operator', - nargs='*', - help="operator ID to print information (default: all)") - arg_parser.add_argument('-c', - '--config', - action='store_true', - help="Save the configuration file per operator") - arg_parser.add_argument('-p', - '--prefix', - help="file prefix to be saved (with -c/--config option)") + arg_parser.add_argument( + "input_file", type=argparse.FileType('rb'), help="tflite file to read") + arg_parser.add_argument( + '-v', '--verbose', type=int, default=1, help="set print level (0~1, default: 1)") + arg_parser.add_argument( + '-t', '--tensor', nargs='*', help="tensor ID to print information (default: all)") + arg_parser.add_argument( + '-o', + '--operator', + nargs='*', + help="operator ID to print information (default: all)") + arg_parser.add_argument( + '-c', + '--config', + action='store_true', + help="Save the configuration file per operator") + arg_parser.add_argument( + '-p', '--prefix', help="file prefix to be saved (with -c/--config option)") args = arg_parser.parse_args() option = MainOption(args) diff --git a/tools/tflitefile_tool/parser/tflite/tflite_option.py b/tools/tflitefile_tool/parser/tflite/tflite_option.py index 8d34ee26987..b85fbae9023 100644 --- a/tools/tflitefile_tool/parser/tflite/tflite_option.py +++ b/tools/tflitefile_tool/parser/tflite/tflite_option.py @@ -64,8 +64,9 @@ def GetStringOptions(op_name, options): if (op_name == "AVERAGE_POOL_2D" or op_name == "MAX_POOL_2D"): return "{}, {}, {}".format( "Filter W:H = {}:{}".format(options.FilterWidth(), options.FilterHeight()), - "Stride W:H = {}:{}".format(options.StrideW(), options.StrideH()), - "Padding = {}".format(GetStringPadding(options))) + "Stride W:H = {}:{}".format(options.StrideW(), + options.StrideH()), "Padding = {}".format( + GetStringPadding(options))) elif (op_name == "CONV_2D"): return "{}, {}, {}".format( "Stride W:H = {}:{}".format(options.StrideW(), options.StrideH()), diff --git a/tools/tflitefile_tool/printer/string_builder.py b/tools/tflitefile_tool/printer/string_builder.py index c5d8d9b2f32..d7654205a55 100644 --- a/tools/tflitefile_tool/printer/string_builder.py +++ b/tools/tflitefile_tool/printer/string_builder.py @@ -76,8 +76,8 @@ def GetStringTensor(tensor): buffer = ["("] if tensor.buffer is not None: - buffer.append("{:5}: ".format(CHAR_SYMBOLS['buffer'] + - str(tensor.buffer_index))) + buffer.append( + "{:5}: ".format(CHAR_SYMBOLS['buffer'] + str(tensor.buffer_index))) # if too big, just skip it. if tensor.buffer.size > 4: buffer.append("".join(['[' for _ in range(tensor.buffer.ndim)])) @@ -85,11 +85,12 @@ def GetStringTensor(tensor): buffer.append("".join([']' for _ in range(tensor.buffer.ndim)])) else: buffer.append( - np.array2string(tensor.buffer, - precision=3, - separator=', ', - threshold=4, - edgeitems=2)) + np.array2string( + tensor.buffer, + precision=3, + separator=', ', + threshold=4, + edgeitems=2)) else: buffer.append("Empty") buffer.append(")") diff --git a/tools/tflitefile_tool/select_operator.py b/tools/tflitefile_tool/select_operator.py index 41bf26a9f03..5ec2b8b2f4c 100755 --- a/tools/tflitefile_tool/select_operator.py +++ b/tools/tflitefile_tool/select_operator.py @@ -1027,9 +1027,8 @@ def GenerateOperator(new_builder, selected_operator, used_tensors_dic, used_opco builtin_option_type = selected_operator.BuiltinOptionsType() if builtin_option_type != 0: selected_builtin_option = selected_operator.BuiltinOptions() - new_builtin_option = GenerateBuiltinOption(new_builder, selected_builtin_option, - builtin_option_type, - used_subgraphs_dic) + new_builtin_option = GenerateBuiltinOption( + new_builder, selected_builtin_option, builtin_option_type, used_subgraphs_dic) # Create custum option vector custom_option_num = selected_operator.CustomOptionsLength() @@ -1162,10 +1161,9 @@ def GenerateSubgraphs(args, new_builder, sample_model, operator_list, new_input_ subg_output_tensors = subg.OutputsAsNumpy() subg_tensors = range(subg.TensorsLength()) subg_tensors_dic = {tensor_idx: tensor_idx for tensor_idx in subg_tensors} - subg_buffers_dic = { - (subg.Tensors(idx)).Buffer(): (subg.Tensors(idx)).Buffer() - for idx in subg_tensors - } + subg_buffers_dic = {(subg.Tensors(idx)).Buffer(): + (subg.Tensors(idx)).Buffer() + for idx in subg_tensors} new_subgraph = GenerateSubgraph(new_builder, subg, subg_opperator_idx_list, subg_input_tensors, subg_output_tensors, subg_tensors_dic, subg_buffers_dic, @@ -1471,26 +1469,25 @@ def main(args): if __name__ == '__main__': # Define argument and read arg_parser = argparse.ArgumentParser() - arg_parser.add_argument("input_model", - type=argparse.FileType('rb'), - help="input tflite model file to read") - arg_parser.add_argument("opcode_list", - type=argparse.FileType('r'), - help="text file including selected operator list") - arg_parser.add_argument("output_model", - type=argparse.FileType('wb'), - help="output tflite model file") - arg_parser.add_argument('-g', - '--subgraph', - type=int, - default=0, - help="subgraph to use (default: 0)") - arg_parser.add_argument('-s', - '--store-io-info', - type=str, - required=False, - default="", - help="Path to io information to be stored") + arg_parser.add_argument( + "input_model", + type=argparse.FileType('rb'), + help="input tflite model file to read") + arg_parser.add_argument( + "opcode_list", + type=argparse.FileType('r'), + help="text file including selected operator list") + arg_parser.add_argument( + "output_model", type=argparse.FileType('wb'), help="output tflite model file") + arg_parser.add_argument( + '-g', '--subgraph', type=int, default=0, help="subgraph to use (default: 0)") + arg_parser.add_argument( + '-s', + '--store-io-info', + type=str, + required=False, + default="", + help="Path to io information to be stored") # TODO # Select multiple subgraph diff --git a/tools/tflitefile_tool/tests/test_string_builder.py b/tools/tflitefile_tool/tests/test_string_builder.py index a7112b2029e..97a580967cc 100644 --- a/tools/tflitefile_tool/tests/test_string_builder.py +++ b/tools/tflitefile_tool/tests/test_string_builder.py @@ -33,24 +33,28 @@ def test_ConvertBytesToHuman(self): self.assertEqual(ConvertBytesToHuman(bytes), 0) bytes = 1 - self.assertEqual(ConvertBytesToHuman(bytes), - format_str % dict(symb=SYMBOLS[0], val=(bytes))) + self.assertEqual( + ConvertBytesToHuman(bytes), format_str % dict(symb=SYMBOLS[0], val=(bytes))) bytes = 1024 - self.assertEqual(ConvertBytesToHuman(bytes), - format_str % dict(symb=SYMBOLS[1], val=(bytes / 1024))) + self.assertEqual( + ConvertBytesToHuman(bytes), + format_str % dict(symb=SYMBOLS[1], val=(bytes / 1024))) bytes = 1024**2 - self.assertEqual(ConvertBytesToHuman(bytes), - format_str % dict(symb=SYMBOLS[2], val=(bytes / (1024**2)))) + self.assertEqual( + ConvertBytesToHuman(bytes), + format_str % dict(symb=SYMBOLS[2], val=(bytes / (1024**2)))) bytes = 1024**3 - self.assertEqual(ConvertBytesToHuman(bytes), - format_str % dict(symb=SYMBOLS[3], val=(bytes / (1024**3)))) + self.assertEqual( + ConvertBytesToHuman(bytes), + format_str % dict(symb=SYMBOLS[3], val=(bytes / (1024**3)))) bytes = 1024**4 - self.assertEqual(ConvertBytesToHuman(bytes), - format_str % dict(symb=SYMBOLS[4], val=(bytes / (1024**4)))) + self.assertEqual( + ConvertBytesToHuman(bytes), + format_str % dict(symb=SYMBOLS[4], val=(bytes / (1024**4)))) # TODO: More tests diff --git a/tools/tflitefile_tool/tests/test_tflite_parser.py b/tools/tflitefile_tool/tests/test_tflite_parser.py index 85fe66d380f..dd1447a8a73 100644 --- a/tools/tflitefile_tool/tests/test_tflite_parser.py +++ b/tools/tflitefile_tool/tests/test_tflite_parser.py @@ -39,8 +39,8 @@ def test_Parse(self): self.assertEqual(len(subg.outputs), tf_subgraph.OutputsLength()) # if there is optional tensors, this assert could be wrong self.assertEqual(len(subg.tensors_map.keys()), tf_subgraph.TensorsLength()) - self.assertEqual(len(subg.operators_map.keys()), - tf_subgraph.OperatorsLength()) + self.assertEqual( + len(subg.operators_map.keys()), tf_subgraph.OperatorsLength()) # because TEST_MODEL_PATH has an op(ADD) self.assertEqual(len(subg.optypes_map.keys()), tf_subgraph.OperatorsLength()) diff --git a/tools/tflkit/summarize_pb.py b/tools/tflkit/summarize_pb.py index cadaa3c3ad1..bdc6b252c4d 100755 --- a/tools/tflkit/summarize_pb.py +++ b/tools/tflkit/summarize_pb.py @@ -6,6 +6,7 @@ class cd: """Context manager for changing the current working directory""" + def __init__(self, newPath): self.newPath = os.path.expanduser(newPath) @@ -65,11 +66,12 @@ def SummarizeGraph(args): vstr = "" PrintName(args.input_file) with cd(args.tensorflow_path): - proc = subprocess.Popen([ - 'bazel-bin/tensorflow/tools/graph_transforms/summarize_graph', - '--in_graph=' + args.input_file - ], - stdout=subprocess.PIPE) + proc = subprocess.Popen( + [ + 'bazel-bin/tensorflow/tools/graph_transforms/summarize_graph', + '--in_graph=' + args.input_file + ], + stdout=subprocess.PIPE) while True: line = proc.stdout.readline().decode() if args.verbose: @@ -90,13 +92,15 @@ def SummarizeGraph(args): if __name__ == '__main__': parser = argparse.ArgumentParser() - parser.add_argument('--input_file', - required=True, - type=lambda s: CheckExt((['pb']), s), - help='pb file to read') - parser.add_argument('--tensorflow_path', - default='../../externals/tensorflow', - help='TensorFlow git repository path') + parser.add_argument( + '--input_file', + required=True, + type=lambda s: CheckExt((['pb']), s), + help='pb file to read') + parser.add_argument( + '--tensorflow_path', + default='../../externals/tensorflow', + help='TensorFlow git repository path') parser.add_argument('--verbose', action='store_true') args = parser.parse_args()