From d3b97074c96e67b2040f33851c76bc9d9918d65f Mon Sep 17 00:00:00 2001 From: Sourcery AI <> Date: Mon, 10 Oct 2022 01:33:00 +0000 Subject: [PATCH] 'Refactored by Sourcery' --- apps/android_camera/models/prepare_model.py | 16 ++-- apps/android_rpc/tests/android_rpc_test.py | 3 +- apps/benchmark/arm_cpu_imagenet_bench.py | 4 +- apps/benchmark/gpu_imagenet_bench.py | 7 +- apps/benchmark/mobile_gpu_imagenet_bench.py | 4 +- apps/benchmark/util.py | 2 +- apps/extension/python/tvm_ext/__init__.py | 6 +- apps/ios_rpc/init_proj.py | 11 +-- apps/ios_rpc/tests/ios_rpc_mobilenet.py | 12 +-- apps/ios_rpc/tests/ios_rpc_test.py | 8 +- .../template_project/microtvm_api_server.py | 39 ++++----- .../tests/test_arduino_microtvm_api_server.py | 4 +- apps/microtvm/cmsisnn/convert_image.py | 2 +- apps/microtvm/ethosu/convert_image.py | 2 +- apps/microtvm/ethosu/convert_labels.py | 2 +- apps/microtvm/reference-vm/base-box-tool.py | 87 +++++++++---------- .../template_project/microtvm_api_server.py | 54 +++++++----- .../zephyr_cmsisnn/model/convert_input.py | 2 +- .../zephyr_cmsisnn/model/convert_labels.py | 2 +- apps/pt_tvmdsoop/tests/test_as_torch.py | 10 +-- apps/pt_tvmdsoop/tests/test_optimize_torch.py | 35 ++++---- .../tests/test_torch_compile_cpu.py | 4 +- .../tests/test_torch_compile_gpu.py | 5 +- .../tests/test_torch_graph_module.py | 7 +- apps/pt_tvmdsoop/tests/test_torch_script.py | 12 +-- .../pt_tvmdsoop/tests/test_torch_vm_module.py | 7 +- apps/relax_examples/mlp.py | 3 +- .../broadcast/test_broadcast_map.py | 7 +- .../topi_recipe/conv/depthwise_conv2d_test.py | 23 +++-- apps/topi_recipe/conv/test_conv2d_hwcn_map.py | 9 +- apps/topi_recipe/conv/test_conv_int8_arm.py | 22 ++--- apps/topi_recipe/conv/test_conv_int8_intel.py | 20 ++--- apps/topi_recipe/gemm/android_gemm_square.py | 3 +- apps/topi_recipe/gemm/cuda_gemm_square.py | 4 +- apps/topi_recipe/reduce/test_reduce_map.py | 4 +- apps/topi_recipe/rnn/lstm.py | 7 +- apps/topi_recipe/rnn/matexp.py | 7 +- apps/uma/_template/passes.py | 2 +- apps/uma/uma_cli.py | 6 +- ci/jenkins/generate.py | 4 +- conda/render_cuda_dockerfiles.py | 4 +- conftest.py | 10 +-- docs/conf.py | 12 ++- docs/script_convert.py | 28 +++--- gallery/how_to/compile_models/from_keras.py | 5 +- gallery/how_to/compile_models/from_mxnet.py | 8 +- gallery/how_to/compile_models/from_oneflow.py | 10 ++- gallery/how_to/compile_models/from_pytorch.py | 10 ++- .../how_to/compile_models/from_tensorflow.py | 10 +-- gallery/how_to/compile_models/from_tflite.py | 6 +- .../deploy_models/deploy_model_on_android.py | 13 ++- .../deploy_models/deploy_model_on_nano.py | 8 +- .../deploy_models/deploy_model_on_rasp.py | 2 +- .../deploy_object_detection_pytorch.py | 2 +- .../deploy_prequantized_tflite.py | 15 ++-- gallery/how_to/deploy_models/deploy_sparse.py | 12 +-- .../extend_tvm/bring_your_own_datatypes.py | 21 ++--- .../extend_tvm/low_level_custom_pass.py | 27 +++--- .../how_to/extend_tvm/use_pass_instrument.py | 20 ++--- .../tune_network_arm.py | 2 +- .../tune_network_cuda.py | 2 +- .../tune_network_mali.py | 2 +- .../tune_network_x86.py | 2 +- .../tune_sparse_x86.py | 22 +++-- .../tune_with_autotvm/tune_relay_arm.py | 15 ++-- .../tune_with_autotvm/tune_relay_cuda.py | 15 ++-- .../tune_relay_mobile_gpu.py | 15 ++-- .../tune_with_autotvm/tune_relay_x86.py | 10 +-- .../how_to/work_with_microtvm/micro_tflite.py | 5 +- .../how_to/work_with_microtvm/micro_train.py | 4 +- gallery/how_to/work_with_relay/build_gcn.py | 45 +++++----- .../using_pipeline_executor.py | 9 +- .../how_to/work_with_schedules/extern_op.py | 2 +- .../how_to/work_with_schedules/intrin_math.py | 2 +- .../how_to/work_with_schedules/tensorize.py | 8 +- gallery/tutorial/autotvm_relay_x86.py | 13 +-- gallery/tutorial/cross_compilation_and_rpc.py | 7 +- gallery/tutorial/relay_quick_start.py | 5 +- gallery/tutorial/tensor_ir_blitz_course.py | 4 +- golang/sample/gen_mobilenet_lib.py | 2 +- jvm/core/src/test/scripts/test_add_gpu.py | 5 +- python/gen_requirements.py | 11 +-- python/setup.py | 41 ++++----- python/tvm/__init__.py | 3 +- python/tvm/_ffi/_ctypes/packed_func.py | 16 ++-- python/tvm/_ffi/_pyversion.py | 3 +- python/tvm/_ffi/base.py | 17 ++-- python/tvm/_ffi/libinfo.py | 35 ++++---- python/tvm/_ffi/registry.py | 20 ++--- python/tvm/_ffi/runtime_ctypes.py | 7 +- python/tvm/arith/analyzer.py | 2 +- python/tvm/auto_scheduler/compute_dag.py | 23 +++-- .../auto_scheduler/cost_model/xgb_model.py | 13 +-- python/tvm/auto_scheduler/dispatcher.py | 14 ++- python/tvm/auto_scheduler/feature.py | 4 +- python/tvm/auto_scheduler/loop_state.py | 9 +- python/tvm/auto_scheduler/measure.py | 54 ++++++------ python/tvm/auto_scheduler/measure_record.py | 5 +- .../tvm/auto_scheduler/relay_integration.py | 20 +++-- python/tvm/auto_scheduler/search_policy.py | 8 +- python/tvm/auto_scheduler/search_task.py | 52 +++++------ python/tvm/auto_scheduler/task_scheduler.py | 77 ++++++++-------- python/tvm/auto_scheduler/utils.py | 7 +- .../tvm/auto_scheduler/workload_registry.py | 11 ++- python/tvm/autotvm/database.py | 6 +- python/tvm/autotvm/feature.py | 15 +--- .../autotvm/graph_tuner/base_graph_tuner.py | 39 ++++----- .../graph_tuner/dynamic_programming_stage.py | 21 ++--- .../graph_tuner/dynamic_programming_tuner.py | 49 +++++------ python/tvm/autotvm/graph_tuner/pbqp_tuner.py | 28 +++--- .../graph_tuner/utils/traverse_graph.py | 22 +++-- python/tvm/autotvm/graph_tuner/utils/utils.py | 13 +-- python/tvm/autotvm/measure/measure.py | 8 +- python/tvm/autotvm/measure/measure_methods.py | 63 +++++++------- python/tvm/autotvm/record.py | 20 ++--- python/tvm/autotvm/task/dispatcher.py | 22 ++--- python/tvm/autotvm/task/space.py | 64 +++++++------- python/tvm/autotvm/task/task.py | 36 +++----- python/tvm/autotvm/task/topi_integration.py | 10 +-- python/tvm/autotvm/tophub.py | 16 ++-- python/tvm/autotvm/tuner/ga_tuner.py | 17 ++-- python/tvm/autotvm/tuner/model_based_tuner.py | 13 +-- python/tvm/autotvm/tuner/tuner.py | 5 +- .../tvm/autotvm/tuner/xgboost_cost_model.py | 38 +++----- python/tvm/contrib/cblas.py | 8 +- python/tvm/contrib/cc.py | 46 +++++----- python/tvm/contrib/clang.py | 6 +- python/tvm/contrib/cudnn.py | 33 +++---- python/tvm/contrib/cutlass/build.py | 46 ++++++---- .../tvm/contrib/cutlass/conv2d_operation.py | 33 +++---- python/tvm/contrib/cutlass/conv2d_profiler.py | 3 +- python/tvm/contrib/cutlass/gemm_operation.py | 26 +++--- python/tvm/contrib/cutlass/gemm_profiler.py | 3 +- python/tvm/contrib/cutlass/gen_conv2d.py | 15 +++- python/tvm/contrib/cutlass/gen_gemm.py | 7 +- python/tvm/contrib/cutlass/gen_tensor_op.py | 9 +- python/tvm/contrib/debugger/debug_executor.py | 9 +- python/tvm/contrib/debugger/debug_result.py | 19 ++-- python/tvm/contrib/download.py | 2 +- .../contrib/ethosu/cascader/device_config.py | 87 +++++++++---------- python/tvm/contrib/graph_executor.py | 13 +-- python/tvm/contrib/mkl.py | 8 +- python/tvm/contrib/mxnet.py | 2 +- python/tvm/contrib/ndk.py | 8 +- python/tvm/contrib/nvcc.py | 49 ++++------- python/tvm/contrib/peak.py | 4 +- python/tvm/contrib/pickle_memoize.py | 4 +- python/tvm/contrib/pipeline_executor.py | 31 +++---- python/tvm/contrib/pipeline_executor_build.py | 83 +++++++++--------- python/tvm/contrib/popen_pool.py | 53 ++++++----- python/tvm/contrib/rocm.py | 11 ++- python/tvm/contrib/sdaccel.py | 4 +- python/tvm/contrib/sparse.py | 23 +++-- python/tvm/contrib/tar.py | 2 +- python/tvm/contrib/tedd.py | 45 +++++----- python/tvm/contrib/thrust.py | 5 +- python/tvm/contrib/utils.py | 11 ++- python/tvm/contrib/xcode.py | 36 +++----- python/tvm/support.py | 11 +-- version.py | 20 +++-- 160 files changed, 1216 insertions(+), 1403 deletions(-) diff --git a/apps/android_camera/models/prepare_model.py b/apps/android_camera/models/prepare_model.py index 959e93f8b4..5361cf4c30 100644 --- a/apps/android_camera/models/prepare_model.py +++ b/apps/android_camera/models/prepare_model.py @@ -41,9 +41,9 @@ def del_dir(target: Union[Path, str], only_if_empty: bool = False): p.chmod(0o666) if p.is_dir(): p.rmdir() + elif only_if_empty: + raise RuntimeError(f"{p.parent} is not empty!") else: - if only_if_empty: - raise RuntimeError(f"{p.parent} is not empty!") p.unlink() target.rmdir() @@ -100,12 +100,12 @@ def main(model_str, output_path): with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(net, tvm.target.Target(target, target_host), params=params) print("dumping lib...") - lib.export_library(output_path_str + "/" + "deploy_lib_cpu.so", ndk.create_shared) + lib.export_library(f"{output_path_str}/deploy_lib_cpu.so", ndk.create_shared) print("dumping graph...") - with open(output_path_str + "/" + "deploy_graph.json", "w") as f: + with open(f"{output_path_str}/deploy_graph.json", "w") as f: f.write(graph) print("dumping params...") - with open(output_path_str + "/" + "deploy_param.params", "wb") as f: + with open(f"{output_path_str}/deploy_param.params", "wb") as f: f.write(tvm.runtime.save_param_dict(params)) print("dumping labels...") synset_url = "".join( @@ -116,11 +116,11 @@ def main(model_str, output_path): "imagenet1000_clsid_to_human.txt", ] ) - synset_path = output_path_str + "/image_net_labels" - download(synset_url, output_path_str + "/image_net_labels") + synset_path = f"{output_path_str}/image_net_labels" + download(synset_url, f"{output_path_str}/image_net_labels") with open(synset_path) as fi: synset = eval(fi.read()) - with open(output_path_str + "/image_net_labels.json", "w") as fo: + with open(f"{output_path_str}/image_net_labels.json", "w") as fo: json.dump(synset, fo, indent=4) os.remove(synset_path) diff --git a/apps/android_rpc/tests/android_rpc_test.py b/apps/android_rpc/tests/android_rpc_test.py index eac77c3133..c351c8c8f4 100644 --- a/apps/android_rpc/tests/android_rpc_test.py +++ b/apps/android_rpc/tests/android_rpc_test.py @@ -21,6 +21,7 @@ Use "android" as the key if you wish to avoid modifying this script. """ + import tvm from tvm import te import os @@ -36,7 +37,7 @@ # Change target configuration. # Run `adb shell cat /proc/cpuinfo` to find the arch. arch = "arm64" -target = "llvm -mtriple=%s-linux-android" % arch +target = f"llvm -mtriple={arch}-linux-android" # whether enable to execute test on OpenCL target test_opencl = False diff --git a/apps/benchmark/arm_cpu_imagenet_bench.py b/apps/benchmark/arm_cpu_imagenet_bench.py index dd89f0562b..77d659bd65 100644 --- a/apps/benchmark/arm_cpu_imagenet_bench.py +++ b/apps/benchmark/arm_cpu_imagenet_bench.py @@ -46,10 +46,10 @@ def evaluate_network(network, target, target_host, repeat): if "android" in str(target): from tvm.contrib import ndk - filename = "%s.so" % network + filename = f"{network}.so" lib.export_library(tmp.relpath(filename), ndk.create_shared) else: - filename = "%s.tar" % network + filename = f"{network}.tar" lib.export_library(tmp.relpath(filename)) # upload library and params diff --git a/apps/benchmark/gpu_imagenet_bench.py b/apps/benchmark/gpu_imagenet_bench.py index 6407f766cb..f064755192 100644 --- a/apps/benchmark/gpu_imagenet_bench.py +++ b/apps/benchmark/gpu_imagenet_bench.py @@ -103,7 +103,10 @@ def benchmark(network, target): else: networks = [args.network] - target = tvm.target.Target("%s -device=%s -model=%s" % (args.target, args.device, args.model)) + target = tvm.target.Target( + f"{args.target} -device={args.device} -model={args.model}" + ) + print("--------------------------------------------------") print("%-20s %-20s" % ("Network Name", "Mean Inference Time (std dev)")) @@ -112,7 +115,7 @@ def benchmark(network, target): if args.thread == 1: benchmark(network, target) else: - threads = list() + threads = [] for n in range(args.thread): thread = threading.Thread( target=benchmark, args=([network, target]), name="thread%d" % n diff --git a/apps/benchmark/mobile_gpu_imagenet_bench.py b/apps/benchmark/mobile_gpu_imagenet_bench.py index 295b0c0eb8..218ef79a2e 100644 --- a/apps/benchmark/mobile_gpu_imagenet_bench.py +++ b/apps/benchmark/mobile_gpu_imagenet_bench.py @@ -46,10 +46,10 @@ def evaluate_network(network, target, target_host, dtype, repeat): if "android" in str(target) or "android" in str(target_host): from tvm.contrib import ndk - filename = "%s.so" % network + filename = f"{network}.so" lib.export_library(tmp.relpath(filename), ndk.create_shared) else: - filename = "%s.tar" % network + filename = f"{network}.tar" lib.export_library(tmp.relpath(filename)) # upload library and params diff --git a/apps/benchmark/util.py b/apps/benchmark/util.py index 01f0a11635..989f384930 100644 --- a/apps/benchmark/util.py +++ b/apps/benchmark/util.py @@ -84,7 +84,7 @@ def get_network(name, batch_size, dtype="float32"): ) net = tvm.IRModule.from_expr(net) else: - raise ValueError("Unsupported network: " + name) + raise ValueError(f"Unsupported network: {name}") return net, params, input_shape, output_shape diff --git a/apps/extension/python/tvm_ext/__init__.py b/apps/extension/python/tvm_ext/__init__.py index be1b42328c..fef4f96367 100644 --- a/apps/extension/python/tvm_ext/__init__.py +++ b/apps/extension/python/tvm_ext/__init__.py @@ -28,9 +28,9 @@ def load_lib(): """Load library, the functions will be registered into TVM""" curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) - # load in as global so the global extern symbol is visible to other dll. - lib = ctypes.CDLL(os.path.join(curr_path, "../../lib/libtvm_ext.so"), ctypes.RTLD_GLOBAL) - return lib + return ctypes.CDLL( + os.path.join(curr_path, "../../lib/libtvm_ext.so"), ctypes.RTLD_GLOBAL + ) _LIB = load_lib() diff --git a/apps/ios_rpc/init_proj.py b/apps/ios_rpc/init_proj.py index 9044a9e8cb..7b38f96153 100644 --- a/apps/ios_rpc/init_proj.py +++ b/apps/ios_rpc/init_proj.py @@ -48,12 +48,9 @@ team_id = args.team_id tvm_build_dir = args.tvm_build_dir -fi = open("tvmrpc.xcodeproj/project.pbxproj") -proj_config = fi.read() -fi.close() - +with open("tvmrpc.xcodeproj/project.pbxproj") as fi: + proj_config = fi.read() proj_config = proj_config.replace(default_team_id, team_id) proj_config = proj_config.replace(default_tvm_build_dir, tvm_build_dir) -fo = open("tvmrpc.xcodeproj/project.pbxproj", "w") -fo.write(proj_config) -fo.close() +with open("tvmrpc.xcodeproj/project.pbxproj", "w") as fo: + fo.write(proj_config) diff --git a/apps/ios_rpc/tests/ios_rpc_mobilenet.py b/apps/ios_rpc/tests/ios_rpc_mobilenet.py index b90b459280..4762d4799d 100644 --- a/apps/ios_rpc/tests/ios_rpc_mobilenet.py +++ b/apps/ios_rpc/tests/ios_rpc_mobilenet.py @@ -39,7 +39,7 @@ # sdk = "iphonesimulator" arch = "arm64" sdk = "iphoneos" -target_host = "llvm -mtriple=%s-apple-darwin" % arch +target_host = f"llvm -mtriple={arch}-apple-darwin" MODES = {"proxy": rpc.connect, "tracker": rpc.connect_tracker, "standalone": rpc.connect} @@ -106,10 +106,7 @@ def run(mod, target): remote = MODES[mode](host, port, key=key) remote.upload(path_dso) - if target == "metal": - dev = remote.metal(0) - else: - dev = remote.cpu(0) + dev = remote.metal(0) if target == "metal" else remote.cpu(0) lib = remote.load_module("deploy.dylib") m = graph_executor.GraphModule(lib["default"](dev)) @@ -174,11 +171,10 @@ def annotate(func, compiler): "--mode", type=str, default="tracker", - help="type of RPC connection (default: tracker), possible values: {}".format( - ", ".join(MODES.keys()) - ), + help=f'type of RPC connection (default: tracker), possible values: {", ".join(MODES.keys())}', ) + args = parser.parse_args() assert args.mode in MODES.keys() test_mobilenet(args.host, args.port, args.key, args.mode) diff --git a/apps/ios_rpc/tests/ios_rpc_test.py b/apps/ios_rpc/tests/ios_rpc_test.py index 94340dcd4e..baa838a53c 100644 --- a/apps/ios_rpc/tests/ios_rpc_test.py +++ b/apps/ios_rpc/tests/ios_rpc_test.py @@ -20,6 +20,7 @@ And configure the proxy host field as commented. """ + import tvm from tvm import te import os @@ -33,7 +34,7 @@ # Change target configuration, this is setting for iphone6s arch = "arm64" sdk = "iphoneos" -target = "llvm -mtriple=%s-apple-darwin" % arch +target = f"llvm -mtriple={arch}-apple-darwin" MODES = {"proxy": rpc.connect, "tracker": rpc.connect_tracker, "standalone": rpc.connect} @@ -105,11 +106,10 @@ def test_rpc_module(host, port, key, mode): "--mode", type=str, default="tracker", - help="type of RPC connection (default: tracker), possible values: {}".format( - ", ".join(MODES.keys()) - ), + help=f'type of RPC connection (default: tracker), possible values: {", ".join(MODES.keys())}', ) + args = parser.parse_args() assert args.mode in MODES.keys() test_rpc_module(args.host, args.port, args.key, args.mode) diff --git a/apps/microtvm/arduino/template_project/microtvm_api_server.py b/apps/microtvm/arduino/template_project/microtvm_api_server.py index 46b717fba4..fbca03af3e 100644 --- a/apps/microtvm/arduino/template_project/microtvm_api_server.py +++ b/apps/microtvm/arduino/template_project/microtvm_api_server.py @@ -57,8 +57,6 @@ class BoardAutodetectFailed(Exception): """Raised when no attached hardware is found matching the requested board""" - - PROJECT_TYPES = ["example_project", "host_driven"] PROJECT_OPTIONS = [ @@ -71,14 +69,14 @@ class BoardAutodetectFailed(Exception): ), server.ProjectOption( "arduino_cli_cmd", - required=( + required=None + if ARDUINO_CLI_CMD + else ["generate_project", "build", "flash", "open_transport"], + optional=( ["generate_project", "build", "flash", "open_transport"] - if not ARDUINO_CLI_CMD + if ARDUINO_CLI_CMD else None ), - optional=( - ["generate_project", "build", "flash", "open_transport"] if ARDUINO_CLI_CMD else None - ), default=ARDUINO_CLI_CMD, type="str", help="Path to the arduino-cli tool.", @@ -208,19 +206,18 @@ def _template_model_header(self, source_dir, metadata): with open(source_dir / "model.h", "r") as f: model_h_template = Template(f.read()) - all_module_names = [] - for name in metadata["modules"].keys(): - all_module_names.append(name) - + all_module_names = list(metadata["modules"].keys()) assert all( metadata["modules"][mod_name]["style"] == "full-model" for mod_name in all_module_names ), "when generating AOT, expect only full-model Model Library Format" - workspace_size_bytes = 0 - for mod_name in all_module_names: - workspace_size_bytes += metadata["modules"][mod_name]["memory"]["functions"]["main"][0][ + workspace_size_bytes = sum( + metadata["modules"][mod_name]["memory"]["functions"]["main"][0][ "workspace_size_bytes" ] + for mod_name in all_module_names + ) + template_values = { "workspace_size_bytes": workspace_size_bytes, } @@ -261,16 +258,16 @@ def _convert_includes(self, project_dir, source_dir): with filename.open("wb") as dst_file: for line in lines: line_str = str(line, "utf-8") - # Check if line has an include - result = re.search(r"#include\s*[<\"]([^>]*)[>\"]", line_str) - if not result: - dst_file.write(line) - else: + if result := re.search( + r"#include\s*[<\"]([^>]*)[>\"]", line_str + ): new_include = self._find_modified_include_path( project_dir, filename, result.groups()[0] ) updated_line = f'#include "{new_include}"\n' dst_file.write(updated_line.encode("utf-8")) + else: + dst_file.write(line) # Most of the files we used to be able to point to directly are under "src/standalone_crt/include/". # Howver, crt_config.h lives under "src/standalone_crt/crt_config/", and more exceptions might @@ -360,7 +357,7 @@ def _get_platform_version(self, arduino_cli_path: str) -> float: version_output = subprocess.run( [arduino_cli_path, "version"], check=True, stdout=subprocess.PIPE ).stdout.decode("utf-8") - str_version = re.search(r"Version: ([\.0-9]*)", version_output).group(1) + str_version = re.search(r"Version: ([\.0-9]*)", version_output)[1] # Using too low a version should raise an error. Note that naively # comparing floats will fail here: 0.7 > 0.21, but 0.21 is a higher @@ -428,7 +425,7 @@ def _parse_connected_boards(self, tabular_str): column_regex = r"\s*|".join(self.POSSIBLE_BOARD_LIST_HEADERS) + r"\s*" str_rows = tabular_str.split("\n") column_headers = list(re.finditer(column_regex, str_rows[0])) - assert len(column_headers) > 0 + assert column_headers for str_row in str_rows[1:]: if not str_row.strip(): diff --git a/apps/microtvm/arduino/template_project/tests/test_arduino_microtvm_api_server.py b/apps/microtvm/arduino/template_project/tests/test_arduino_microtvm_api_server.py index e74e3de55d..7a8ecfcea2 100644 --- a/apps/microtvm/arduino/template_project/tests/test_arduino_microtvm_api_server.py +++ b/apps/microtvm/arduino/template_project/tests/test_arduino_microtvm_api_server.py @@ -178,7 +178,7 @@ def test_flash(self, mock_run): # Test we checked version then called upload assert mock_run.call_count == 2 assert mock_run.call_args_list[0][0] == (["arduino-cli", "version"],) - assert mock_run.call_args_list[1][0][0][0:2] == ["arduino-cli", "upload"] + assert mock_run.call_args_list[1][0][0][:2] == ["arduino-cli", "upload"] mock_run.reset_mock() # Test exception raised when `arduino-cli upload` returns error code @@ -188,4 +188,4 @@ def test_flash(self, mock_run): # Version information should be cached and not checked again mock_run.assert_called_once() - assert mock_run.call_args[0][0][0:2] == ["arduino-cli", "upload"] + assert mock_run.call_args[0][0][:2] == ["arduino-cli", "upload"] diff --git a/apps/microtvm/cmsisnn/convert_image.py b/apps/microtvm/cmsisnn/convert_image.py index b7930ff73e..21fba76b25 100755 --- a/apps/microtvm/cmsisnn/convert_image.py +++ b/apps/microtvm/cmsisnn/convert_image.py @@ -27,7 +27,7 @@ def create_header_file(name, tensor_name, tensor_data, output_path): """ This function generates a header file containing the data from the numpy array provided. """ - file_path = pathlib.Path(f"{output_path}/" + name).resolve() + file_path = pathlib.Path(f"{output_path}/{name}").resolve() # Create header file with npy_data as a C array raw_path = file_path.with_suffix(".h").resolve() with open(raw_path, "w") as header_file: diff --git a/apps/microtvm/ethosu/convert_image.py b/apps/microtvm/ethosu/convert_image.py index 924d4bafde..a72233da52 100755 --- a/apps/microtvm/ethosu/convert_image.py +++ b/apps/microtvm/ethosu/convert_image.py @@ -27,7 +27,7 @@ def create_header_file(name, section, tensor_name, tensor_data, output_path): """ This function generates a header file containing the data from the numpy array provided. """ - file_path = pathlib.Path(f"{output_path}/" + name).resolve() + file_path = pathlib.Path(f"{output_path}/{name}").resolve() # Create header file with npy_data as a C array raw_path = file_path.with_suffix(".h").resolve() with open(raw_path, "w") as header_file: diff --git a/apps/microtvm/ethosu/convert_labels.py b/apps/microtvm/ethosu/convert_labels.py index 0b468b9e06..b2ef30fcf6 100755 --- a/apps/microtvm/ethosu/convert_labels.py +++ b/apps/microtvm/ethosu/convert_labels.py @@ -33,7 +33,7 @@ def create_labels_header(labels_file, section, output_path): with open(file_path, "w") as header_file: header_file.write(f'char* labels[] __attribute__((section("{section}"), aligned(16))) = {{') - for _, label in enumerate(labels): + for label in labels: header_file.write(f'"{label.rstrip()}",') header_file.write("};\n") diff --git a/apps/microtvm/reference-vm/base-box-tool.py b/apps/microtvm/reference-vm/base-box-tool.py index 325b9bc0c4..6e74d324ad 100755 --- a/apps/microtvm/reference-vm/base-box-tool.py +++ b/apps/microtvm/reference-vm/base-box-tool.py @@ -112,10 +112,8 @@ def parse_virtualbox_attached_usb_devices(vm_uuid): ) r = re.compile(VIRTUALBOX_USB_DEVICE_RE) - attached_usb_devices = r.findall(output, re.MULTILINE) - # List of couples (VendorId, ProductId) for all attached USB devices - return attached_usb_devices + return r.findall(output, re.MULTILINE) VIRTUALBOX_VID_PID_RE = re.compile(r"0x([0-9A-Fa-f]{4}).*") @@ -227,21 +225,20 @@ def attach_vmware(uuid, vid_hex=None, pid_hex=None, serial=None): def generate_packer_config(file_path, providers): - builders = [] provisioners = [] - for provider_name in providers: - builders.append( - { - "name": f"{provider_name}", - "type": "vagrant", - "box_name": f"microtvm-base-{provider_name}", - "output_dir": f"output-packer-{provider_name}", - "communicator": "ssh", - "source_path": "generic/ubuntu1804", - "provider": provider_name, - "template": "Vagrantfile.packer-template", - } - ) + builders = [ + { + "name": f"{provider_name}", + "type": "vagrant", + "box_name": f"microtvm-base-{provider_name}", + "output_dir": f"output-packer-{provider_name}", + "communicator": "ssh", + "source_path": "generic/ubuntu1804", + "provider": provider_name, + "template": "Vagrantfile.packer-template", + } + for provider_name in providers + ] repo_root = subprocess.check_output( ["git", "rev-parse", "--show-toplevel"], encoding="utf-8" @@ -253,17 +250,17 @@ def generate_packer_config(file_path, providers): filename = os.path.basename(script_path) provisioners.append({"type": "file", "source": script_path, "destination": f"~/{filename}"}) - provisioners.append( - { - "type": "shell", - "script": "base_box_setup.sh", - } - ) - provisioners.append( - { - "type": "shell", - "script": "base_box_provision.sh", - } + provisioners.extend( + ( + { + "type": "shell", + "script": "base_box_setup.sh", + }, + { + "type": "shell", + "script": "base_box_provision.sh", + }, + ) ) with open(file_path, "w") as f: @@ -355,9 +352,7 @@ def do_build_release_test_vm( if "config.vm.box_version" in line: continue m = VM_BOX_RE.match(line) - tvm_home_m = VM_TVM_HOME_RE.match(line) - - if tvm_home_m: + if tvm_home_m := VM_TVM_HOME_RE.match(line): # Adjust tvm home for testing step f.write(f'{tvm_home_m.group(1)} = "../../../.."\n') continue @@ -388,10 +383,16 @@ def do_build_release_test_vm( def do_run_release_test(release_test_dir, provider_name, test_config, test_device_serial): - with open( - os.path.join(release_test_dir, ".vagrant", "machines", "default", provider_name, "id") - ) as f: - machine_uuid = f.read() + machine_uuid = pathlib.Path( + os.path.join( + release_test_dir, + ".vagrant", + "machines", + "default", + provider_name, + "id", + ) + ).read_text() # Check if target is not QEMU if test_config["vid_hex"] and test_config["pid_hex"]: @@ -411,11 +412,12 @@ def _quote_cmd(cmd): + " && " + _quote_cmd( [ - f"apps/microtvm/reference-vm/base-box/base_box_test.sh", + "apps/microtvm/reference-vm/base-box/base_box_test.sh", test_config["microtvm_board"], ] ) ) + subprocess.check_call(["vagrant", "ssh", "-c", f"bash -ec '{test_cmd}'"], cwd=release_test_dir) @@ -440,7 +442,7 @@ def test_command(args): providers = args.provider - release_test_dir = THIS_DIR / f"release-test" + release_test_dir = THIS_DIR / "release-test" if args.skip_build or args.skip_destroy: assert ( @@ -484,11 +486,7 @@ def test_command(args): def release_command(args): - if args.release_full_name: - vm_name = args.release_full_name - else: - vm_name = "tlcpack/microtvm" - + vm_name = args.release_full_name or "tlcpack/microtvm" if not args.skip_creating_release_version: subprocess.check_call( [ @@ -501,7 +499,7 @@ def release_command(args): ] ) if not args.release_version: - sys.exit(f"--release-version must be specified") + sys.exit("--release-version must be specified") for provider_name in args.provider: subprocess.check_call( @@ -616,8 +614,7 @@ def parse_args(): ), ) - args = parser.parse_args() - return args + return parser.parse_args() def main(): diff --git a/apps/microtvm/zephyr/template_project/microtvm_api_server.py b/apps/microtvm/zephyr/template_project/microtvm_api_server.py index b6114f1098..b2eddbc770 100644 --- a/apps/microtvm/zephyr/template_project/microtvm_api_server.py +++ b/apps/microtvm/zephyr/template_project/microtvm_api_server.py @@ -67,7 +67,7 @@ # We only check two levels of the version. ZEPHYR_VERSION = 2.7 -WEST_CMD = default = sys.executable + " -m west" if sys.executable else None +WEST_CMD = default = f"{sys.executable} -m west" if sys.executable else None ZEPHYR_BASE = os.getenv("ZEPHYR_BASE") @@ -206,13 +206,14 @@ def generic_find_serial_port(serial_number=None): serial_ports = list(serial.tools.list_ports.grep(regex)) - if len(serial_ports) == 0: + if not serial_ports: raise Exception(f"No serial port found for board {prop['board']}!") if len(serial_ports) != 1: - ports_lst = "" - for port in serial_ports: - ports_lst += f"Serial port: {port.device}, serial number: {port.serial_number}\n" + ports_lst = "".join( + f"Serial port: {port.device}, serial number: {port.serial_number}\n" + for port in serial_ports + ) raise Exception("Expected 1 serial port, found multiple ports:\n {ports_lst}") @@ -244,18 +245,14 @@ def _get_nrf_device_args(options): return ["--snr", options["nrfjprog_snr"]] - if not boards: - return [] - - return ["--snr", boards[0]] + return ["--snr", boards[0]] if boards else [] PROJECT_TYPES = [] if IS_TEMPLATE: - for d in (API_SERVER_DIR / "src").iterdir(): - if d.is_dir(): - PROJECT_TYPES.append(d.name) - + PROJECT_TYPES.extend( + d.name for d in (API_SERVER_DIR / "src").iterdir() if d.is_dir() + ) PROJECT_OPTIONS = [ server.ProjectOption( @@ -266,7 +263,9 @@ def _get_nrf_device_args(options): ), server.ProjectOption( "gdbserver_port", - help=("If given, port number to use when running the local gdbserver."), + help=( + "If given, port number to use when running the local gdbserver." + ), optional=["open_transport"], type="int", ), @@ -274,13 +273,17 @@ def _get_nrf_device_args(options): "nrfjprog_snr", optional=["open_transport"], type="int", - help=("When used with nRF targets, serial # of the attached board to use, from nrfjprog."), + help=( + "When used with nRF targets, serial # of the attached board to use, from nrfjprog." + ), ), server.ProjectOption( "openocd_serial", optional=["open_transport"], type="int", - help=("When used with OpenOCD targets, serial # of the attached board to use."), + help=( + "When used with OpenOCD targets, serial # of the attached board to use." + ), ), server.ProjectOption( "project_type", @@ -307,8 +310,14 @@ def _get_nrf_device_args(options): ), server.ProjectOption( "zephyr_base", - required=(["generate_project", "open_transport"] if not ZEPHYR_BASE else None), - optional=(["generate_project", "open_transport"] if ZEPHYR_BASE else ["build"]), + required=None + if ZEPHYR_BASE + else ["generate_project", "open_transport"], + optional=( + ["generate_project", "open_transport"] + if ZEPHYR_BASE + else ["build"] + ), default=ZEPHYR_BASE, type="str", help="Path to the zephyr base directory.", @@ -702,7 +711,7 @@ def _find_nrf_serial_port(cls, options): nrf_board = usb.core.find(idVendor=cls.NRF5340_VENDOR_ID) - if nrf_board == None: + if nrf_board is None: raise Exception("_find_nrf_serial_port: unable to find NRF5340DK") if nrf_board.idProduct in cls.NRF5340_DK_BOARD_VCOM_BY_PRODUCT_ID: @@ -762,12 +771,11 @@ def close(self): def read(self, n, timeout_sec): self._port.timeout = timeout_sec - to_return = self._port.read(n) - if not to_return: + if to_return := self._port.read(n): + return to_return + else: raise server.IoTimeoutError() - return to_return - def write(self, data, timeout_sec): self._port.write_timeout = timeout_sec bytes_written = 0 diff --git a/apps/microtvm/zephyr_cmsisnn/model/convert_input.py b/apps/microtvm/zephyr_cmsisnn/model/convert_input.py index 7b10e86b0f..3ca2ed492a 100644 --- a/apps/microtvm/zephyr_cmsisnn/model/convert_input.py +++ b/apps/microtvm/zephyr_cmsisnn/model/convert_input.py @@ -25,7 +25,7 @@ def create_file(name, prefix, tensor_name, tensor_data, output_path): """ This function generates a header file containing the data from the numpy array provided. """ - file_path = pathlib.Path(f"{output_path}/" + name).resolve() + file_path = pathlib.Path(f"{output_path}/{name}").resolve() # Create header file with npy_data as a C array raw_path = file_path.with_suffix(".c").resolve() with open(raw_path, "w") as header_file: diff --git a/apps/microtvm/zephyr_cmsisnn/model/convert_labels.py b/apps/microtvm/zephyr_cmsisnn/model/convert_labels.py index d1bce6d798..99728ba3af 100644 --- a/apps/microtvm/zephyr_cmsisnn/model/convert_labels.py +++ b/apps/microtvm/zephyr_cmsisnn/model/convert_labels.py @@ -33,7 +33,7 @@ def create_labels_header(labels_file, output_path): with open(file_path, "w") as header_file: header_file.write(f"char* labels[] = {{") - for _, label in enumerate(labels): + for label in labels: header_file.write(f'"{label.rstrip()}",') header_file.write("};\n") diff --git a/apps/pt_tvmdsoop/tests/test_as_torch.py b/apps/pt_tvmdsoop/tests/test_as_torch.py index 2c454e9454..96acc69ecb 100644 --- a/apps/pt_tvmdsoop/tests/test_as_torch.py +++ b/apps/pt_tvmdsoop/tests/test_as_torch.py @@ -51,16 +51,16 @@ def main(a: T.handle, b: T.handle, c: T.handle) -> None: @tvm.script.ir_module class ModuleGPU: @T.prim_func - def main(A: T.Buffer[8, "float32"], B: T.Buffer[8, "float32"]) -> None: + def main(self, B: T.Buffer[8, "float32"]) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": True}) for i_0 in T.thread_binding(2, thread="blockIdx.x"): for i_2 in T.thread_binding(2, thread="threadIdx.x"): for i_1 in T.serial(2): with T.block("B"): vi = T.axis.spatial(8, i_0 * 4 + i_1 * 2 + i_2) - T.reads(A[vi]) + T.reads(self[vi]) T.writes(B[vi]) - B[vi] = A[vi] + T.float32(1) + B[vi] = self[vi] + T.float32(1) @as_torch @@ -96,11 +96,11 @@ def func_with_part_access_region(a: T.handle, b: T.handle, c: T.handle) -> None: @tvm.script.ir_module class MyModule: @T.prim_func - def main(a: T.handle, b: T.handle): + def main(self, b: T.handle): # We exchange data between function by handles, which are similar to pointer. T.func_attr({"global_symbol": "main", "tir.noalias": True}) # Create buffer from handles. - A = T.match_buffer(a, (8,), dtype="float32") + A = T.match_buffer(self, (8,), dtype="float32") B = T.match_buffer(b, (8,), dtype="float32") for i in range(8): # A block is an abstraction for computation. diff --git a/apps/pt_tvmdsoop/tests/test_optimize_torch.py b/apps/pt_tvmdsoop/tests/test_optimize_torch.py index 258dfe55c4..2a933ca4f3 100644 --- a/apps/pt_tvmdsoop/tests/test_optimize_torch.py +++ b/apps/pt_tvmdsoop/tests/test_optimize_torch.py @@ -131,24 +131,25 @@ def compare_optimize_resnet18_to_torchscript(): for i in range(20): test_input = torch.rand(1, 3, 224, 224).half().cuda() sub_label = f"[test {i}]" - results.append( - benchmark.Timer( - stmt="meta_module_resnet18(test_input)", - setup="from __main__ import meta_module_resnet18", - globals={"test_input": test_input}, - sub_label=sub_label, - description="tuning by meta", - ).blocked_autorange() - ) - results.append( - benchmark.Timer( - stmt="jit_module_resnet18(test_input)", - setup="from __main__ import jit_module_resnet18", - globals={"test_input": test_input}, - sub_label=sub_label, - description="tuning by jit", - ).blocked_autorange() + results.extend( + ( + benchmark.Timer( + stmt="meta_module_resnet18(test_input)", + setup="from __main__ import meta_module_resnet18", + globals={"test_input": test_input}, + sub_label=sub_label, + description="tuning by meta", + ).blocked_autorange(), + benchmark.Timer( + stmt="jit_module_resnet18(test_input)", + setup="from __main__ import jit_module_resnet18", + globals={"test_input": test_input}, + sub_label=sub_label, + description="tuning by jit", + ).blocked_autorange(), + ) ) + compare = benchmark.Compare(results) compare.print() diff --git a/apps/pt_tvmdsoop/tests/test_torch_compile_cpu.py b/apps/pt_tvmdsoop/tests/test_torch_compile_cpu.py index 5ad88b45dc..66274edc68 100644 --- a/apps/pt_tvmdsoop/tests/test_torch_compile_cpu.py +++ b/apps/pt_tvmdsoop/tests/test_torch_compile_cpu.py @@ -37,7 +37,7 @@ def forward(self, x: torch.Tensor): print(model_jit.graph) print("run torchscript...") -for i in range(20): +for _ in range(20): t = time.time() model_jit(x) print(time.time() - t) @@ -61,7 +61,7 @@ def forward(self, x: torch.Tensor): print("Run PyTorch...") -for i in range(20): +for _ in range(20): t = time.time() outputs = pytorch_tvm_module.forward([x.cpu()]) print(1000 * (time.time() - t)) diff --git a/apps/pt_tvmdsoop/tests/test_torch_compile_gpu.py b/apps/pt_tvmdsoop/tests/test_torch_compile_gpu.py index b2ceb7f5cd..a1a496768b 100644 --- a/apps/pt_tvmdsoop/tests/test_torch_compile_gpu.py +++ b/apps/pt_tvmdsoop/tests/test_torch_compile_gpu.py @@ -17,6 +17,7 @@ # specific language governing permissions and limitations # under the License. """Test script for torch module""" + import torch import time from torchvision.models import resnet50 @@ -30,7 +31,7 @@ print(model_jit.graph) print("run torchscript...") -for i in range(20): +for _ in range(20): t = time.time() model_jit(x) torch.cuda.synchronize() @@ -55,7 +56,7 @@ print("Run PyTorch...") -for i in range(20): +for _ in range(20): t = time.time() outputs = pytorch_tvm_module.forward([x]) torch.cuda.synchronize() diff --git a/apps/pt_tvmdsoop/tests/test_torch_graph_module.py b/apps/pt_tvmdsoop/tests/test_torch_graph_module.py index 4e3b51227c..7d7b4d3379 100644 --- a/apps/pt_tvmdsoop/tests/test_torch_graph_module.py +++ b/apps/pt_tvmdsoop/tests/test_torch_graph_module.py @@ -87,10 +87,9 @@ def get_inputs_by_device(device): inps = [torch.Tensor(x), torch.Tensor(y)] if device == "cpu": return inps - else: - device_type, device_id = device.split(":") - assert device_type == "cuda" - return [inp.cuda(int(device_id)) for inp in inps] + device_type, device_id = device.split(":") + assert device_type == "cuda" + return [inp.cuda(int(device_id)) for inp in inps] assets = [os.path.join(export_dir, i) for i in TVM_ASSETS] engine.init((x.shape, y.shape), *assets) diff --git a/apps/pt_tvmdsoop/tests/test_torch_script.py b/apps/pt_tvmdsoop/tests/test_torch_script.py index 34b959714a..71f2503377 100644 --- a/apps/pt_tvmdsoop/tests/test_torch_script.py +++ b/apps/pt_tvmdsoop/tests/test_torch_script.py @@ -36,7 +36,7 @@ def forward(self, x, y): model.cuda().half() x = torch.rand([1280, 2464, 4]).cuda().half() y = torch.rand([1280, 4, 1]).cuda().half() -for i in range(20): +for _ in range(20): t = time.time() o = model(x, y) torch.cuda.synchronize() @@ -75,7 +75,7 @@ def forward(self, x, y): print("Run TVM...") tvm_x = tvm.nd.array(x.cpu().numpy().astype(dtype), device=tvm.gpu(0)) tvm_y = tvm.nd.array(y.cpu().numpy().astype(dtype), device=tvm.gpu(0)) -for i in range(20): +for _ in range(20): t = time.time() tvm_mod.run(x=tvm_x, y=tvm_y) print(1000 * (time.time() - t)) @@ -84,7 +84,7 @@ def forward(self, x, y): print("Run PyTorch...") -for i in range(20): +for _ in range(20): t = time.time() outputs = pytorch_mod.forward([x, y]) torch.cuda.synchronize() @@ -98,11 +98,7 @@ def __init__(self): self.layer = torch.jit.script(pytorch_mod) def forward(self, x, y, z) -> torch.Tensor: - if x > 1: - out = self.layer(y, z)[0] - else: - out = torch.ones([1280, 2464, 1]) - return out + return self.layer(y, z)[0] if x > 1 else torch.ones([1280, 2464, 1]) print("Exporting...") diff --git a/apps/pt_tvmdsoop/tests/test_torch_vm_module.py b/apps/pt_tvmdsoop/tests/test_torch_vm_module.py index 81d9dadb02..c26ed80356 100644 --- a/apps/pt_tvmdsoop/tests/test_torch_vm_module.py +++ b/apps/pt_tvmdsoop/tests/test_torch_vm_module.py @@ -80,10 +80,9 @@ def get_inputs_by_device(device): inps = [torch.Tensor(x), torch.Tensor(y)] if device == "cpu": return inps - else: - device_type, device_id = device.split(":") - assert device_type == "cuda" - return [inp.cuda(int(device_id)) for inp in inps] + device_type, device_id = device.split(":") + assert device_type == "cuda" + return [inp.cuda(int(device_id)) for inp in inps] assets = [os.path.join(export_dir, i) for i in TVM_ASSETS] engine.init((x.shape, y.shape), *assets) diff --git a/apps/relax_examples/mlp.py b/apps/relax_examples/mlp.py index fa69524a80..9edcc45a41 100644 --- a/apps/relax_examples/mlp.py +++ b/apps/relax_examples/mlp.py @@ -32,8 +32,7 @@ def build_mlp(data, weight): gv1 = bb.emit_te(topi.nn.relu, gv0) bb.emit_func_output(gv1) - mod = bb.get() - return mod + return bb.get() if __name__ == "__main__": diff --git a/apps/topi_recipe/broadcast/test_broadcast_map.py b/apps/topi_recipe/broadcast/test_broadcast_map.py index 4840a292d4..3bb41251ea 100644 --- a/apps/topi_recipe/broadcast/test_broadcast_map.py +++ b/apps/topi_recipe/broadcast/test_broadcast_map.py @@ -29,8 +29,7 @@ @tvm.register_func("tvm_callback_cuda_compile", override=True) def tvm_callback_cuda_compile(code): - ptx = nvcc.compile_cuda(code, target_format="ptx") - return ptx + return nvcc.compile_cuda(code, target_format="ptx") def write_code(code, fname): @@ -42,9 +41,9 @@ def write_code(code, fname): def tvm_callback_cuda_postproc(code): if not os.path.exists("perf"): os.mkdir("perf") - write_code(code, "perf/%s_generated.cu" % TASK) + write_code(code, f"perf/{TASK}_generated.cu") if USE_MANUAL_CODE: - code = open("perf/%s_manual.cu" % TASK).read() + code = open(f"perf/{TASK}_manual.cu").read() return code diff --git a/apps/topi_recipe/conv/depthwise_conv2d_test.py b/apps/topi_recipe/conv/depthwise_conv2d_test.py index 5ec205df52..b371fcfca9 100644 --- a/apps/topi_recipe/conv/depthwise_conv2d_test.py +++ b/apps/topi_recipe/conv/depthwise_conv2d_test.py @@ -34,8 +34,7 @@ @tvm.register_func("tvm_callback_cuda_compile", override=True) def tvm_callback_cuda_compile(code): - ptx = nvcc.compile_cuda(code, target_format="ptx") - return ptx + return nvcc.compile_cuda(code, target_format="ptx") def write_code(code, fname): @@ -47,9 +46,9 @@ def write_code(code, fname): def tvm_callback_cuda_postproc(code): if not os.path.exists("perf"): os.mkdir("perf") - write_code(code, "perf/%s_generated.cu" % TASK) + write_code(code, f"perf/{TASK}_generated.cu") if USE_MANUAL_CODE: - code = open("perf/%s_manual.cu" % TASK).read() + code = open(f"perf/{TASK}_manual.cu").read() return code @@ -93,7 +92,7 @@ def test_depthwise_conv2d_nchw(): def check_device(device): if not tvm.runtime.enabled(device): - print("Skip because %s is not enabled" % device) + print(f"Skip because {device} is not enabled") return dev = tvm.device(device, 0) # Build the kernel @@ -122,11 +121,11 @@ def check_device(device): # Measure time cost of kernel 3 (depthwise_conv2d + scale_shift + relu) timer_3 = f3.time_evaluator(f3.entry_name, dev, number=1000) tcost_3 = timer_3(input_tvm, filter_tvm, scale_tvm, shift_tvm, relu_tvm).mean - print("Input shape = " + str(get_const_tuple(Input.shape))) - print("Filter shape = " + str(get_const_tuple(Filter.shape))) + print(f"Input shape = {str(get_const_tuple(Input.shape))}") + print(f"Filter shape = {str(get_const_tuple(Filter.shape))}") print("Stride = (%d, %d)" % (stride_h, stride_w)) print("padding = %s\n" % padding) - print("Output shape = " + str(get_const_tuple(DepthwiseConv2d.shape))) + print(f"Output shape = {str(get_const_tuple(DepthwiseConv2d.shape))}") print("average time cost of 1000 runs (depthwise_conv2d) = %g us" % (tcost_1 * 1e6)) print( "average time cost of 1000 runs (depthwise_conv2d + scale_shift) = %g us" @@ -199,7 +198,7 @@ def test_depthwise_conv2d_nhwc(): def check_device(device): if not tvm.runtime.enabled(device): - print("Skip because %s is not enabled" % device) + print(f"Skip because {device} is not enabled") return dev = tvm.device(device, 0) # Build the kernel @@ -227,11 +226,11 @@ def check_device(device): # Measure time cost of kernel 3 (depthwise_conv2d + scale_shift + relu) timer_3 = f3.time_evaluator(f3.entry_name, dev, number=1000) tcost_3 = timer_3(input_tvm, filter_tvm, scale_tvm, shift_tvm, relu_tvm).mean - print("Input shape = " + str(get_const_tuple(Input.shape))) - print("Filter shape = " + str(get_const_tuple(Filter.shape))) + print(f"Input shape = {str(get_const_tuple(Input.shape))}") + print(f"Filter shape = {str(get_const_tuple(Filter.shape))}") print("Stride = (%d, %d)" % (stride_h, stride_w)) print("padding = %s\n" % padding) - print("Output shape = " + str(get_const_tuple(DepthwiseConv2d.shape))) + print(f"Output shape = {str(get_const_tuple(DepthwiseConv2d.shape))}") print("average time cost of 1000 runs (depthwise_conv2d) = %g us" % (tcost_1 * 1e6)) print( "average time cost of 1000 runs (depthwise_conv2d + scale_shift) = %g us" diff --git a/apps/topi_recipe/conv/test_conv2d_hwcn_map.py b/apps/topi_recipe/conv/test_conv2d_hwcn_map.py index 6b239edb65..841c231e98 100644 --- a/apps/topi_recipe/conv/test_conv2d_hwcn_map.py +++ b/apps/topi_recipe/conv/test_conv2d_hwcn_map.py @@ -30,8 +30,7 @@ @tvm.register_func("tvm_callback_cuda_compile", override=True) def tvm_callback_cuda_compile(code): - ptx = nvcc.compile_cuda(code, target_format="ptx") - return ptx + return nvcc.compile_cuda(code, target_format="ptx") def write_code(code, fname): @@ -43,9 +42,9 @@ def write_code(code, fname): def tvm_callback_cuda_postproc(code): if not os.path.exists("perf"): os.mkdir("perf") - write_code(code, "perf/%s_generated.cu" % TASK) + write_code(code, f"perf/{TASK}_generated.cu") if USE_MANUAL_CODE: - code = open("perf/%s_manual.cu" % TASK).read() + code = open(f"perf/{TASK}_manual.cu").read() return code @@ -73,7 +72,7 @@ def test_conv2d_hwcn_map(): def check_device(device): if not tvm.runtime.enabled(device): - print("Skip because %s is not enabled" % device) + print(f"Skip because {device} is not enabled") return dev = tvm.device(device, 0) a = tvm.nd.array(a_np, dev) diff --git a/apps/topi_recipe/conv/test_conv_int8_arm.py b/apps/topi_recipe/conv/test_conv_int8_arm.py index ed2464140c..d86a40fb40 100644 --- a/apps/topi_recipe/conv/test_conv_int8_arm.py +++ b/apps/topi_recipe/conv/test_conv_int8_arm.py @@ -72,7 +72,7 @@ def get_shape( """ data_shape = (1, in_filter // NUM_VEC_LANES, im_height, im_width, NUM_VEC_LANES) - if out_dtype == "int32" or out_dtype == "uint32": + if out_dtype in ["int32", "uint32"]: kernel_shape = ( out_filter // NUM_VEC_LANES, in_filter // NUM_VEC_LANES, @@ -201,23 +201,15 @@ def run_inference( for i, wkl in enumerate(WORKLOADS): for dtype in ["uint", "int"]: fp32_time = run_inference("float32", "float32", "float32", *wkl) - int8_time = run_inference("%s8" % dtype, "%s8" % dtype, "%s32" % dtype, *wkl) + int8_time = run_inference(f"{dtype}8", f"{dtype}8", f"{dtype}32", *wkl) kernel_h = wkl[4] kernel_w = wkl[5] LOGGER.info( - "[%s] Workload#" % dtype - + str(i) - + ", " - + str(kernel_h) - + "x" - + str(kernel_w) - + ", " - + str(fp32_time) - + ", " - + str(int8_time) - + ", " - + str(fp32_time / int8_time) + f"[{dtype}] Workload#{str(i)}, {str(kernel_h)}x{str(kernel_w)}, {str(fp32_time)}, {str(int8_time)}, {str(fp32_time / int8_time)}" ) + SPEEDUP_ARRAY.append(fp32_time / int8_time) - LOGGER.info("Average speedup --> %s" % str(sum(SPEEDUP_ARRAY) / float(len(SPEEDUP_ARRAY)))) + LOGGER.info( + f"Average speedup --> {str(sum(SPEEDUP_ARRAY) / float(len(SPEEDUP_ARRAY)))}" + ) diff --git a/apps/topi_recipe/conv/test_conv_int8_intel.py b/apps/topi_recipe/conv/test_conv_int8_intel.py index 36f8233559..7327f7058e 100644 --- a/apps/topi_recipe/conv/test_conv_int8_intel.py +++ b/apps/topi_recipe/conv/test_conv_int8_intel.py @@ -187,22 +187,14 @@ def run_inference( for i, wkl in enumerate(WORKLOADS): fp32_time = run_inference("float32", "float32", "float32", *wkl) int8_time = run_inference("uint8", "int8", "int32", *wkl) - kernel_h = wkl[4] kernel_w = wkl[5] + kernel_h = wkl[4] LOGGER.info( - "Workload#" - + str(i) - + ", " - + str(kernel_h) - + "x" - + str(kernel_w) - + ", " - + str(fp32_time) - + ", " - + str(int8_time) - + ", " - + str(fp32_time / int8_time) + f"Workload#{str(i)}, {str(kernel_h)}x{str(kernel_w)}, {str(fp32_time)}, {str(int8_time)}, {str(fp32_time / int8_time)}" ) + SPEEDUP_ARRAY.append(fp32_time / int8_time) - LOGGER.info("Average speedup --> %s" % str(sum(SPEEDUP_ARRAY) / float(len(SPEEDUP_ARRAY)))) + LOGGER.info( + f"Average speedup --> {str(sum(SPEEDUP_ARRAY) / float(len(SPEEDUP_ARRAY)))}" + ) diff --git a/apps/topi_recipe/gemm/android_gemm_square.py b/apps/topi_recipe/gemm/android_gemm_square.py index 5f13d88707..faf5283c0c 100644 --- a/apps/topi_recipe/gemm/android_gemm_square.py +++ b/apps/topi_recipe/gemm/android_gemm_square.py @@ -15,6 +15,7 @@ # specific language governing permissions and limitations # under the License. """Example code to do square matrix multiplication on Android Phone.""" + import tvm from tvm import te import os @@ -30,7 +31,7 @@ # Change target configuration. # Run `adb shell cat /proc/cpuinfo` to find the arch. arch = "arm64" -target = "llvm -mtriple=%s-linux-android" % arch +target = f"llvm -mtriple={arch}-linux-android" def ngflops(N): diff --git a/apps/topi_recipe/gemm/cuda_gemm_square.py b/apps/topi_recipe/gemm/cuda_gemm_square.py index be55d158fc..ef094f59c5 100644 --- a/apps/topi_recipe/gemm/cuda_gemm_square.py +++ b/apps/topi_recipe/gemm/cuda_gemm_square.py @@ -102,9 +102,9 @@ def test_gemm(): def check_device(device): dev = tvm.device(device, 0) if not dev.exist: - print("Skip because %s is not enabled" % device) + print(f"Skip because {device} is not enabled") return - print("Device %s" % device) + print(f"Device {device}") f = tvm.build(s, [A, B, C], device) # launch the kernel. n, m, l = nn, nn, nn diff --git a/apps/topi_recipe/reduce/test_reduce_map.py b/apps/topi_recipe/reduce/test_reduce_map.py index f8d63e2d19..ca9574c06f 100644 --- a/apps/topi_recipe/reduce/test_reduce_map.py +++ b/apps/topi_recipe/reduce/test_reduce_map.py @@ -36,9 +36,9 @@ def write_code(code, fname): def tvm_callback_cuda_postproc(code): if not os.path.exists("perf"): os.mkdir("perf") - write_code(code, "perf/%s_generated.cu" % TASK) + write_code(code, f"perf/{TASK}_generated.cu") if USE_MANUAL_CODE: - code = open("perf/%s_manual.cu" % TASK).read() + code = open(f"perf/{TASK}_manual.cu").read() return code diff --git a/apps/topi_recipe/rnn/lstm.py b/apps/topi_recipe/rnn/lstm.py index bb9e31c5b2..a8d4cfa2f8 100644 --- a/apps/topi_recipe/rnn/lstm.py +++ b/apps/topi_recipe/rnn/lstm.py @@ -33,8 +33,7 @@ @tvm.register_func("tvm_callback_cuda_compile", override=True) def tvm_callback_cuda_compile(code): """Use nvcc compiler for better perf.""" - ptx = nvcc.compile_cuda(code, target_format="ptx") - return ptx + return nvcc.compile_cuda(code, target_format="ptx") def write_code(code, fname): @@ -46,9 +45,9 @@ def write_code(code, fname): def tvm_callback_cuda_postproc(code): if not os.path.exists("perf"): os.mkdir("perf") - write_code(code, "perf/%s_generated.cu" % TASK) + write_code(code, f"perf/{TASK}_generated.cu") if USE_MANUAL_CODE: - code = open("perf/%s_manual.cu" % TASK).read() + code = open(f"perf/{TASK}_manual.cu").read() return code diff --git a/apps/topi_recipe/rnn/matexp.py b/apps/topi_recipe/rnn/matexp.py index 303f0ed80d..6b415432d5 100644 --- a/apps/topi_recipe/rnn/matexp.py +++ b/apps/topi_recipe/rnn/matexp.py @@ -42,8 +42,7 @@ @tvm.register_func("tvm_callback_cuda_compile", override=True) def tvm_callback_cuda_compile(code): """Use nvcc compiler for better perf.""" - ptx = nvcc.compile_cuda(code, target_format="ptx") - return ptx + return nvcc.compile_cuda(code, target_format="ptx") def write_code(code, fname): @@ -55,9 +54,9 @@ def write_code(code, fname): def tvm_callback_cuda_postproc(code): if not os.path.exists("perf"): os.mkdir("perf") - write_code(code, "perf/%s_generated.cu" % TASK) + write_code(code, f"perf/{TASK}_generated.cu") if USE_MANUAL_CODE: - code = open("perf/%s_manual.cu" % TASK).read() + code = open(f"perf/{TASK}_manual.cu").read() return code diff --git a/apps/uma/_template/passes.py b/apps/uma/_template/passes.py index b4f261a5ab..459d9d6359 100644 --- a/apps/uma/_template/passes.py +++ b/apps/uma/_template/passes.py @@ -33,7 +33,7 @@ def transform_function( @classmethod def _my_ai_hw_conv2d_pass(cls, func, mod, ctx): - _loops = dict() + _loops = {} _handles = [] _entry_node = None diff --git a/apps/uma/uma_cli.py b/apps/uma/uma_cli.py index 159fa9e62c..8854550cfc 100644 --- a/apps/uma/uma_cli.py +++ b/apps/uma/uma_cli.py @@ -41,8 +41,7 @@ def _parse_args(): "--tutorial", type=str, ) - args = parser.parse_args() - return args + return parser.parse_args() def replace_template_name( @@ -52,8 +51,7 @@ def replace_template_name( Replace names in template skeleton code by new name """ for f in files: - with open(f) as read_file: - data = read_file.read() + data = pathlib.Path(f).read_text() for case in [underscore, camelize]: data = data.replace(case(template_name), case(add_hw_name)) data = data.replace(template_source, underscore(add_hw_name)) diff --git a/ci/jenkins/generate.py b/ci/jenkins/generate.py index 3d0198ba6f..975a2e8911 100644 --- a/ci/jenkins/generate.py +++ b/ci/jenkins/generate.py @@ -84,9 +84,7 @@ def lines_without_generated_tag(content): parser.add_argument("--check", action="store_true", help="just verify the output didn't change") args = parser.parse_args() - with open(JENKINSFILE) as f: - content = f.read() - + content = Path(JENKINSFILE).read_text() data["generated_time"] = datetime.datetime.now().isoformat() environment = jinja2.Environment( diff --git a/conda/render_cuda_dockerfiles.py b/conda/render_cuda_dockerfiles.py index d9d32f05fb..e2b186949a 100644 --- a/conda/render_cuda_dockerfiles.py +++ b/conda/render_cuda_dockerfiles.py @@ -53,8 +53,6 @@ def render_dockerfile(version): if __name__ == "__main__": - build_versions = CUDA_VERSIONS - if len(sys.argv) > 1: - build_versions = sys.argv[1:] + build_versions = sys.argv[1:] if len(sys.argv) > 1 else CUDA_VERSIONS for version in build_versions: render_dockerfile(version) diff --git a/conftest.py b/conftest.py index 0583dac201..17ed9703d7 100644 --- a/conftest.py +++ b/conftest.py @@ -52,10 +52,7 @@ "tests/python/topi/python/test_topi_conv2d_winograd.py::test_conv2d_nchw", "tests/python/relay/test_py_converter.py::test_global_recursion", ] -HARDCODED_ALLOCATIONS = {} -for idx, test in enumerate(_slowest_tests): - HARDCODED_ALLOCATIONS[test] = idx - +HARDCODED_ALLOCATIONS = {test: idx for idx, test in enumerate(_slowest_tests)} # These rely on running on the same node to pass successfully FIXED_ALLOCATION_PREFIXES = { "tests/python/unittest/test_tvm_testing_features.py": 0, @@ -84,7 +81,10 @@ def find_shard_index(nodeid: str, num_shards: int) -> int: def pytest_collection_modifyitems(config, items): - if not all(k in os.environ for k in ["CI", "TVM_NUM_SHARDS", "TVM_SHARD_INDEX"]): + if any( + k not in os.environ + for k in ["CI", "TVM_NUM_SHARDS", "TVM_SHARD_INDEX"] + ): # Only apportion tests if in CI and in a job that is set up for it return diff --git a/docs/conf.py b/docs/conf.py index d645958ca6..d406b05f86 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -60,7 +60,7 @@ # General information about the project. project = "tvm" author = "Apache Software Foundation" -copyright = "2020 - 2022, %s" % author +copyright = f"2020 - 2022, {author}" github_doc_root = "https://github.com/apache/tvm/tree/main/docs/" os.environ["TVM_BUILD_DOC"] = "1" @@ -73,7 +73,7 @@ def git_describe_version(original_version): exec(compile(open(ver_py, "rb").read(), ver_py, "exec"), libver, libver) _, gd_version = libver["git_describe_version"]() if gd_version != original_version: - print("Use git describe based version %s" % gd_version) + print(f"Use git describe based version {gd_version}") return gd_version @@ -188,7 +188,7 @@ def git_describe_version(original_version): # Output file base name for HTML help builder. -htmlhelp_basename = project + "doc" +htmlhelp_basename = f"{project}doc" # -- Options for LaTeX output --------------------------------------------- latex_elements = {} @@ -196,9 +196,7 @@ def git_describe_version(original_version): # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). -latex_documents = [ - (main_doc, "%s.tex" % project, project, author, "manual"), -] +latex_documents = [(main_doc, f"{project}.tex", project, author, "manual")] intersphinx_mapping = { "python": ("https://docs.python.org/{.major}".format(sys.version_info), None), @@ -471,7 +469,7 @@ def update_alias_docstring(name, obj, lines): if hasattr(sys.modules[amod], target_name): obj_type = ":py:func" if callable(obj) else ":py:class" - lines.append(".. rubric:: Alias of %s:`%s.%s`" % (obj_type, amod, target_name)) + lines.append(f".. rubric:: Alias of {obj_type}:`{amod}.{target_name}`") def process_docstring(app, what, name, obj, options, lines): diff --git a/docs/script_convert.py b/docs/script_convert.py index edd173b295..26378b07e0 100644 --- a/docs/script_convert.py +++ b/docs/script_convert.py @@ -58,22 +58,20 @@ def bash_to_python(src_path: pathlib.Path, dest_path: pathlib.Path): new_line_required = True else: new_line_required = False - pass + elif line == BASH: + bash_detected = True + elif line == BASH_IGNORE: + bash_ignore_detected = True + elif line in [BASH_MULTILINE_COMMENT_START, BASH_MULTILINE_COMMENT_END]: + if new_line_required: + dest_f.write("\n") + dest_f.write('"""') + new_line_required = True else: - if line == BASH: - bash_detected = True - elif line == BASH_IGNORE: - bash_ignore_detected = True - elif line in [BASH_MULTILINE_COMMENT_START, BASH_MULTILINE_COMMENT_END]: - if new_line_required: - dest_f.write("\n") - dest_f.write('"""') - new_line_required = True - else: - if new_line_required: - dest_f.write("\n") - dest_f.write(f"{line}") - new_line_required = True + if new_line_required: + dest_f.write("\n") + dest_f.write(f"{line}") + new_line_required = True line = src_f.readline() if new_line_required: diff --git a/gallery/how_to/compile_models/from_keras.py b/gallery/how_to/compile_models/from_keras.py index 895a601ada..20c12e474f 100644 --- a/gallery/how_to/compile_models/from_keras.py +++ b/gallery/how_to/compile_models/from_keras.py @@ -35,6 +35,7 @@ https://keras.io/#installation """ + # sphinx_gallery_start_ignore from tvm import testing @@ -136,8 +137,8 @@ synset_path = download_testdata(synset_url, synset_name, module="data") with open(synset_path) as f: synset = eval(f.read()) -print("Relay top-1 id: {}, class name: {}".format(top1_tvm, synset[top1_tvm])) +print(f"Relay top-1 id: {top1_tvm}, class name: {synset[top1_tvm]}") # confirm correctness with keras output keras_out = keras_resnet50.predict(data.transpose([0, 2, 3, 1])) top1_keras = np.argmax(keras_out) -print("Keras top-1 id: {}, class name: {}".format(top1_keras, synset[top1_keras])) +print(f"Keras top-1 id: {top1_keras}, class name: {synset[top1_keras]}") diff --git a/gallery/how_to/compile_models/from_mxnet.py b/gallery/how_to/compile_models/from_mxnet.py index 3808461862..ebee6da53c 100644 --- a/gallery/how_to/compile_models/from_mxnet.py +++ b/gallery/how_to/compile_models/from_mxnet.py @@ -132,10 +132,12 @@ def transform_image(image): def block2symbol(block): data = mx.sym.Variable("data") sym = block(data) - args = {} auxs = {} - for k, v in block.collect_params().items(): - args[k] = mx.nd.array(v.data().asnumpy()) + args = { + k: mx.nd.array(v.data().asnumpy()) + for k, v in block.collect_params().items() + } + return sym, args, auxs diff --git a/gallery/how_to/compile_models/from_oneflow.py b/gallery/how_to/compile_models/from_oneflow.py index eb27c4b3e3..2cb17ace9d 100644 --- a/gallery/how_to/compile_models/from_oneflow.py +++ b/gallery/how_to/compile_models/from_oneflow.py @@ -100,8 +100,7 @@ def __init__(self, module): self.m = module def build(self, x): - out = self.m(x) - return out + return self.m(x) graph = Graph(model) @@ -177,7 +176,10 @@ def build(self, x): top_oneflow = np.argmax(output.numpy()) oneflow_class_key = class_id_to_key[top_oneflow] -print("Relay top-1 id: {}, class name: {}".format(top1_tvm, key_to_classname[tvm_class_key])) print( - "OneFlow top-1 id: {}, class name: {}".format(top_oneflow, key_to_classname[oneflow_class_key]) + f"Relay top-1 id: {top1_tvm}, class name: {key_to_classname[tvm_class_key]}" +) + +print( + f"OneFlow top-1 id: {top_oneflow}, class name: {key_to_classname[oneflow_class_key]}" ) diff --git a/gallery/how_to/compile_models/from_pytorch.py b/gallery/how_to/compile_models/from_pytorch.py index 98b531fa6d..aec4dd67f6 100644 --- a/gallery/how_to/compile_models/from_pytorch.py +++ b/gallery/how_to/compile_models/from_pytorch.py @@ -41,6 +41,7 @@ be unstable. """ + # sphinx_gallery_start_ignore from tvm import testing @@ -173,5 +174,10 @@ top1_torch = np.argmax(output.numpy()) torch_class_key = class_id_to_key[top1_torch] -print("Relay top-1 id: {}, class name: {}".format(top1_tvm, key_to_classname[tvm_class_key])) -print("Torch top-1 id: {}, class name: {}".format(top1_torch, key_to_classname[torch_class_key])) +print( + f"Relay top-1 id: {top1_tvm}, class name: {key_to_classname[tvm_class_key]}" +) + +print( + f"Torch top-1 id: {top1_torch}, class name: {key_to_classname[torch_class_key]}" +) diff --git a/gallery/how_to/compile_models/from_tensorflow.py b/gallery/how_to/compile_models/from_tensorflow.py index 9a32397815..6bf2a497be 100644 --- a/gallery/how_to/compile_models/from_tensorflow.py +++ b/gallery/how_to/compile_models/from_tensorflow.py @@ -24,6 +24,7 @@ Please refer to https://www.tensorflow.org/install """ + # sphinx_gallery_start_ignore from tvm import testing @@ -43,18 +44,13 @@ import tensorflow as tf -# Ask tensorflow to limit its GPU memory to what's actually needed -# instead of gobbling everything that's available. -# https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth -# This way this tutorial is a little more friendly to sphinx-gallery. -gpus = tf.config.list_physical_devices("GPU") -if gpus: +if gpus := tf.config.list_physical_devices("GPU"): try: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) print("tensorflow will use experimental.set_memory_growth(True)") except RuntimeError as e: - print("experimental.set_memory_growth option is not available: {}".format(e)) + print(f"experimental.set_memory_growth option is not available: {e}") try: diff --git a/gallery/how_to/compile_models/from_tflite.py b/gallery/how_to/compile_models/from_tflite.py index 712269381f..e9a745b4a4 100644 --- a/gallery/how_to/compile_models/from_tflite.py +++ b/gallery/how_to/compile_models/from_tflite.py @@ -73,7 +73,7 @@ def extract(path): tar.extractall(path=dir_path) tar.close() else: - raise RuntimeError("Could not decompress the file: " + path) + raise RuntimeError(f"Could not decompress the file: {path}") ###################################################################### @@ -195,4 +195,6 @@ def extract(path): prediction = np.argmax(predictions) # Convert id to class name and show the result -print("The image prediction result is: id " + str(prediction) + " name: " + labels[prediction]) +print( + f"The image prediction result is: id {str(prediction)} name: {labels[prediction]}" +) diff --git a/gallery/how_to/deploy_models/deploy_model_on_android.py b/gallery/how_to/deploy_models/deploy_model_on_android.py index 10e108239e..a5940b67f3 100644 --- a/gallery/how_to/deploy_models/deploy_model_on_android.py +++ b/gallery/how_to/deploy_models/deploy_model_on_android.py @@ -263,7 +263,7 @@ def transform_image(image): # Change target configuration. # Run `adb shell cat /proc/cpuinfo` to find the arch. arch = "arm64" -target = tvm.target.Target("llvm -mtriple=%s-linux-android" % arch) +target = tvm.target.Target(f"llvm -mtriple={arch}-linux-android") if local_demo: target = tvm.target.Target("llvm") @@ -286,7 +286,7 @@ def transform_image(image): # Save the library at local temporary directory. tmp = utils.tempdir() lib_fname = tmp.relpath("net.so") -fcompile = ndk.create_shared if not local_demo else None +fcompile = None if local_demo else ndk.create_shared lib.export_library(lib_fname, fcompile) ###################################################################### @@ -306,15 +306,12 @@ def transform_image(image): # When running a heavy model, we should increase the `session_timeout` remote = tracker.request(key, priority=0, session_timeout=60) -if local_demo: +if local_demo or test_target not in ["opencl", "vulkan"]: dev = remote.cpu(0) elif test_target == "opencl": dev = remote.cl(0) -elif test_target == "vulkan": - dev = remote.vulkan(0) else: - dev = remote.cpu(0) - + dev = remote.vulkan(0) # upload the library to remote device and load it remote.upload(lib_fname) rlib = remote.load_module("net.so") @@ -335,7 +332,7 @@ def transform_image(image): # get top1 result top1 = np.argmax(out.numpy()) -print("TVM prediction top-1: {}".format(synset[top1])) +print(f"TVM prediction top-1: {synset[top1]}") print("Evaluate inference time cost...") print(module.benchmark(dev, number=1, repeat=10)) diff --git a/gallery/how_to/deploy_models/deploy_model_on_nano.py b/gallery/how_to/deploy_models/deploy_model_on_nano.py index 5e59dccf20..ecc4fe69ec 100644 --- a/gallery/how_to/deploy_models/deploy_model_on_nano.py +++ b/gallery/how_to/deploy_models/deploy_model_on_nano.py @@ -231,11 +231,7 @@ def transform_image(image): rlib = remote.load_module("net.tar") # create the remote runtime module -if local_demo: - dev = remote.cpu(0) -else: - dev = remote.cuda(0) - +dev = remote.cpu(0) if local_demo else remote.cuda(0) module = runtime.GraphModule(rlib["default"](dev)) # set input data module.set_input("data", tvm.nd.array(x.astype("float32"))) @@ -245,4 +241,4 @@ def transform_image(image): out = module.get_output(0) # get top1 result top1 = np.argmax(out.numpy()) -print("TVM prediction top-1: {}".format(synset[top1])) +print(f"TVM prediction top-1: {synset[top1]}") diff --git a/gallery/how_to/deploy_models/deploy_model_on_rasp.py b/gallery/how_to/deploy_models/deploy_model_on_rasp.py index ab5374d93d..829e78d178 100644 --- a/gallery/how_to/deploy_models/deploy_model_on_rasp.py +++ b/gallery/how_to/deploy_models/deploy_model_on_rasp.py @@ -233,4 +233,4 @@ def transform_image(image): out = module.get_output(0) # get top1 result top1 = np.argmax(out.numpy()) -print("TVM prediction top-1: {}".format(synset[top1])) +print(f"TVM prediction top-1: {synset[top1]}") diff --git a/gallery/how_to/deploy_models/deploy_object_detection_pytorch.py b/gallery/how_to/deploy_models/deploy_object_detection_pytorch.py index 0d8d0f2867..fcfff4507b 100644 --- a/gallery/how_to/deploy_models/deploy_object_detection_pytorch.py +++ b/gallery/how_to/deploy_models/deploy_object_detection_pytorch.py @@ -156,4 +156,4 @@ def forward(self, inp): else: break -print("Get {} valid boxes".format(len(valid_boxes))) +print(f"Get {len(valid_boxes)} valid boxes") diff --git a/gallery/how_to/deploy_models/deploy_prequantized_tflite.py b/gallery/how_to/deploy_models/deploy_prequantized_tflite.py index 494b4a9e21..eb5eba1855 100644 --- a/gallery/how_to/deploy_models/deploy_prequantized_tflite.py +++ b/gallery/how_to/deploy_models/deploy_prequantized_tflite.py @@ -91,7 +91,7 @@ def extract(path): tar.extractall(path=dir_path) tar.close() else: - raise RuntimeError("Could not decompress the file: " + path) + raise RuntimeError(f"Could not decompress the file: {path}") extract(model_path) @@ -113,8 +113,7 @@ def get_real_image(im_height, im_width): img_path = download_testdata(image_url, img_name, module="data") image = Image.open(img_path).resize((im_height, im_width)) x = np.array(image).astype("uint8") - data = np.reshape(x, (1, im_height, im_width, 3)) - return data + return np.reshape(x, (1, im_height, im_width, 3)) data = get_real_image(224, 224) @@ -163,12 +162,10 @@ def run_tflite_model(tflite_model_buf, input_data): # Run interpreter.invoke() - # get output - tflite_output = list() - for i in range(len(output_details)): - tflite_output.append(interpreter.get_tensor(output_details[i]["index"])) - - return tflite_output + return [ + interpreter.get_tensor(output_details[i]["index"]) + for i in range(len(output_details)) + ] ############################################################################### diff --git a/gallery/how_to/deploy_models/deploy_sparse.py b/gallery/how_to/deploy_models/deploy_sparse.py index b9a26e0d30..e4960ee7be 100644 --- a/gallery/how_to/deploy_models/deploy_sparse.py +++ b/gallery/how_to/deploy_models/deploy_sparse.py @@ -70,6 +70,7 @@ sparse speed using fake weights to see the benefit of structured sparsity. """ + # sphinx_gallery_start_ignore from tvm import testing @@ -96,18 +97,13 @@ import scipy.sparse as sp -# Ask tensorflow to limit its GPU memory to what's actually needed -# instead of gobbling everything that's available. -# https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth -# This way this tutorial is a little more friendly to sphinx-gallery. -gpus = tf.config.list_physical_devices("GPU") -if gpus: +if gpus := tf.config.list_physical_devices("GPU"): try: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) print("tensorflow will use experimental.set_memory_growth(True)") except RuntimeError as e: - print("experimental.set_memory_growth option is not available: {}".format(e)) + print(f"experimental.set_memory_growth option is not available: {e}") ############################################################################### @@ -155,7 +151,7 @@ def load_keras_model(module, name, seq_len, batch_size, report_runtime=True): ) start = time.time() repeats = 50 - for i in range(repeats): + for _ in range(repeats): np_out = model(np_input) end = time.time() print("Keras Runtime: %f ms." % (1000 * ((end - start) / repeats))) diff --git a/gallery/how_to/extend_tvm/bring_your_own_datatypes.py b/gallery/how_to/extend_tvm/bring_your_own_datatypes.py index 479269a224..2144fafbb3 100644 --- a/gallery/how_to/extend_tvm/bring_your_own_datatypes.py +++ b/gallery/how_to/extend_tvm/bring_your_own_datatypes.py @@ -52,6 +52,7 @@ ctypes.CDLL('my-datatype-lib.so', ctypes.RTLD_GLOBAL) """ + # sphinx_gallery_start_ignore from tvm import testing @@ -82,14 +83,14 @@ x_input = np.random.rand(3).astype("float32") y_input = np.random.rand(3).astype("float32") -print("x: {}".format(x_input)) -print("y: {}".format(y_input)) +print(f"x: {x_input}") +print(f"y: {y_input}") ###################################################################### # Finally, we're ready to run the program: z_output = relay.create_executor(mod=module).evaluate()(x_input, y_input) -print("z: {}".format(z_output)) +print(f"z: {z_output}") ###################################################################### # Adding Custom Datatypes @@ -140,7 +141,7 @@ try: with tvm.transform.PassContext(config={"tir.disable_vectorize": True}): z_output_myfloat = relay.create_executor("graph", mod=module).evaluate()(x_input, y_input) - print("z: {}".format(y_myfloat)) + print(f"z: {y_myfloat}") except tvm.TVMError as e: # Print last line of error print(str(e).split("\n")[-1]) @@ -185,7 +186,7 @@ try: with tvm.transform.PassContext(config={"tir.disable_vectorize": True}): z_output_myfloat = relay.create_executor("graph", mod=module).evaluate()(x_input, y_input) - print("z: {}".format(z_output_myfloat)) + print(f"z: {z_output_myfloat}") except tvm.TVMError as e: # Print last line of error print(str(e).split("\n")[-1]) @@ -214,12 +215,12 @@ # Now, we can run our program without errors. with tvm.transform.PassContext(config={"tir.disable_vectorize": True}): z_output_myfloat = relay.create_executor(mod=module).evaluate()(x_input, y_input) -print("z: {}".format(z_output_myfloat)) +print(f"z: {z_output_myfloat}") -print("x:\t\t{}".format(x_input)) -print("y:\t\t{}".format(y_input)) -print("z (float32):\t{}".format(z_output)) -print("z (myfloat32):\t{}".format(z_output_myfloat)) +print(f"x:\t\t{x_input}") +print(f"y:\t\t{y_input}") +print(f"z (float32):\t{z_output}") +print(f"z (myfloat32):\t{z_output_myfloat}") # Perhaps as expected, the ``myfloat32`` results and ``float32`` are exactly the same! diff --git a/gallery/how_to/extend_tvm/low_level_custom_pass.py b/gallery/how_to/extend_tvm/low_level_custom_pass.py index 0f99c72cee..850656a006 100644 --- a/gallery/how_to/extend_tvm/low_level_custom_pass.py +++ b/gallery/how_to/extend_tvm/low_level_custom_pass.py @@ -93,10 +93,12 @@ def find_width8(op): """Find all the 'tir.For' nodes whose extent can be divided by 8.""" - if isinstance(op, tvm.tir.For): - if isinstance(op.extent, tvm.tir.IntImm): - if op.extent.value % 8 == 0: - loops.append(op) + if ( + isinstance(op, tvm.tir.For) + and isinstance(op.extent, tvm.tir.IntImm) + and op.extent.value % 8 == 0 + ): + loops.append(op) ##################################################################### @@ -120,7 +122,7 @@ def vectorize8(op): if op in loops: extent = op.extent.value name = op.loop_var.name - lo, li = te.var(name + ".outer"), te.var(name + ".inner") + lo, li = te.var(f"{name}.outer"), te.var(f"{name}.inner") body = tvm.tir.stmt_functor.substitute(op.body, {op.loop_var: lo * 8 + li}) body = tvm.tir.For(li, 0, 8, tvm.tir.ForKind.VECTORIZED, body) body = tvm.tir.For(lo, 0, extent // 8, tvm.tir.ForKind.SERIAL, body) @@ -134,12 +136,15 @@ def vectorize(f, mod, ctx): tvm.tir.stmt_functor.post_order_visit(f.body, find_width8) - if not loops: - return f - - # The last list arugment indicates what kinds of nodes will be transformed. - # Thus, in this case only `For` nodes will call `vectorize8` - return f.with_body(tvm.tir.stmt_functor.ir_transform(f.body, None, vectorize8, ["tir.For"])) + return ( + f.with_body( + tvm.tir.stmt_functor.ir_transform( + f.body, None, vectorize8, ["tir.For"] + ) + ) + if loops + else f + ) ##################################################################### diff --git a/gallery/how_to/extend_tvm/use_pass_instrument.py b/gallery/how_to/extend_tvm/use_pass_instrument.py index 3079e2f0e7..b94bd351aa 100644 --- a/gallery/how_to/extend_tvm/use_pass_instrument.py +++ b/gallery/how_to/extend_tvm/use_pass_instrument.py @@ -140,14 +140,13 @@ def run_before_pass(self, mod, info): def run_after_pass(self, mod, info): # Pop out the latest recorded pass. name_before, op_to_cnt_before = self._op_cnt_before_stack.pop() - assert name_before == info.name, "name_before: {}, info.name: {} doesn't match".format( - name_before, info.name - ) + assert ( + name_before == info.name + ), f"name_before: {name_before}, info.name: {info.name} doesn't match" + cur_depth = len(self._op_cnt_before_stack) op_to_cnt_after = self._count_nodes(mod) - op_diff = self._diff(op_to_cnt_after, op_to_cnt_before) - # only record passes causing differences. - if op_diff: + if op_diff := self._diff(op_to_cnt_after, op_to_cnt_before): self._op_diff.append((cur_depth, info.name, op_diff)) def get_pass_to_op_diff(self): @@ -183,8 +182,7 @@ def _diff(d_after, d_before): ret = {} key_after, key_before = set(d_after), set(d_before) for k in key_before & key_after: - tmp = d_after[k] - d_before[k] - if tmp: + if tmp := d_after[k] - d_before[k]: ret[k] = d_after[k] - d_before[k] for k in key_after - key_before: ret[k] = d_after[k] @@ -267,14 +265,14 @@ class PassFine(PassExampleBase): class PassBadEnterCtx(PassExampleBase): def enter_pass_ctx(self): print(self._name, "bad enter_pass_ctx!!!") - raise ValueError("{} bad enter_pass_ctx".format(self._name)) + raise ValueError(f"{self._name} bad enter_pass_ctx") @pass_instrument class PassBadExitCtx(PassExampleBase): def exit_pass_ctx(self): print(self._name, "bad exit_pass_ctx!!!") - raise ValueError("{} bad exit_pass_ctx".format(self._name)) + raise ValueError(f"{self._name} bad exit_pass_ctx") ############################################################################### @@ -329,7 +327,7 @@ def exit_pass_ctx(self): class PassBadRunBefore(PassExampleBase): def run_before_pass(self, mod, pass_info): print(self._name, "bad run_before_pass!!!") - raise ValueError("{} bad run_before_pass".format(self._name)) + raise ValueError(f"{self._name} bad run_before_pass") demo_ctx = tvm.transform.PassContext( diff --git a/gallery/how_to/tune_with_autoscheduler/tune_network_arm.py b/gallery/how_to/tune_with_autoscheduler/tune_network_arm.py index 09a1d0cea5..fdca93b4ce 100644 --- a/gallery/how_to/tune_with_autoscheduler/tune_network_arm.py +++ b/gallery/how_to/tune_with_autoscheduler/tune_network_arm.py @@ -86,7 +86,7 @@ def get_network(name, batch_size, layout="NHWC", dtype="float32", use_sparse=Fal elif layout == "NCHW": image_shape = (3, 224, 224) else: - raise ValueError("Invalid layout: " + layout) + raise ValueError(f"Invalid layout: {layout}") input_shape = (batch_size,) + image_shape output_shape = (batch_size, 1000) diff --git a/gallery/how_to/tune_with_autoscheduler/tune_network_cuda.py b/gallery/how_to/tune_with_autoscheduler/tune_network_cuda.py index cc29f27ba2..4ceccd7897 100644 --- a/gallery/how_to/tune_with_autoscheduler/tune_network_cuda.py +++ b/gallery/how_to/tune_with_autoscheduler/tune_network_cuda.py @@ -81,7 +81,7 @@ def get_network(name, batch_size, layout="NHWC", dtype="float32"): elif layout == "NCHW": image_shape = (3, 224, 224) else: - raise ValueError("Invalid layout: " + layout) + raise ValueError(f"Invalid layout: {layout}") input_shape = (batch_size,) + image_shape output_shape = (batch_size, 1000) diff --git a/gallery/how_to/tune_with_autoscheduler/tune_network_mali.py b/gallery/how_to/tune_with_autoscheduler/tune_network_mali.py index 8ac0b235d7..a67d53b770 100644 --- a/gallery/how_to/tune_with_autoscheduler/tune_network_mali.py +++ b/gallery/how_to/tune_with_autoscheduler/tune_network_mali.py @@ -82,7 +82,7 @@ def get_network(name, batch_size, layout="NHWC", dtype="float32"): elif layout == "NCHW": image_shape = (3, 224, 224) else: - raise ValueError("Invalid layout: " + layout) + raise ValueError(f"Invalid layout: {layout}") input_shape = (batch_size,) + image_shape output_shape = (batch_size, 1000) diff --git a/gallery/how_to/tune_with_autoscheduler/tune_network_x86.py b/gallery/how_to/tune_with_autoscheduler/tune_network_x86.py index 5a321104c8..7f4a393789 100644 --- a/gallery/how_to/tune_with_autoscheduler/tune_network_x86.py +++ b/gallery/how_to/tune_with_autoscheduler/tune_network_x86.py @@ -83,7 +83,7 @@ def get_network(name, batch_size, layout="NHWC", dtype="float32", use_sparse=Fal elif layout == "NCHW": image_shape = (3, 224, 224) else: - raise ValueError("Invalid layout: " + layout) + raise ValueError(f"Invalid layout: {layout}") input_shape = (batch_size,) + image_shape output_shape = (batch_size, 1000) diff --git a/gallery/how_to/tune_with_autoscheduler/tune_sparse_x86.py b/gallery/how_to/tune_with_autoscheduler/tune_sparse_x86.py index 0a2ddbd1bd..32e4a6dd4c 100644 --- a/gallery/how_to/tune_with_autoscheduler/tune_sparse_x86.py +++ b/gallery/how_to/tune_with_autoscheduler/tune_sparse_x86.py @@ -125,16 +125,25 @@ def sparse_dense(M, N, K, w_data_shape, w_indices_shape, w_indptr_shape, dtype): ) task = tvm.auto_scheduler.SearchTask( func=sparse_dense, - args=(M, N, K, W_sp_np.data.shape, W_sp_np.indices.shape, W_sp_np.indptr.shape, "float32"), + args=( + M, + N, + K, + W_sp_np.data.shape, + W_sp_np.indices.shape, + W_sp_np.indptr.shape, + "float32", + ), target=target, task_inputs={ - prefix + "W_data": runtime.ndarray.array(W_sp_np.data), - prefix + "W_indices": runtime.ndarray.array(W_sp_np.indices), - prefix + "W_indptr": runtime.ndarray.array(W_sp_np.indptr), + f"{prefix}W_data": runtime.ndarray.array(W_sp_np.data), + f"{prefix}W_indices": runtime.ndarray.array(W_sp_np.indices), + f"{prefix}W_indptr": runtime.ndarray.array(W_sp_np.indptr), }, task_inputs_save_to_file=True, ) + # Inspect the computational graph print("Computational DAG:") print(task.compute_dag) @@ -164,7 +173,6 @@ def meet_condition_func(search_policy, state, stage_id): def apply_func(search_policy, state, stage_id): - ret = [] s0 = auto_scheduler.loop_state.State(state, search_policy.search_task.compute_dag) if s0.stages[stage_id].op.tag == "sparse_dense_sp_rhs_bsrmm_block": return [s0.state_object, stage_id - 1] @@ -200,9 +208,7 @@ def apply_func(search_policy, state, stage_id): s0.reorder(consumer, [m0, n0, m1, n1]) s0.compute_at(sparse_dense_block, consumer, n0) - ret.append([s0.state_object, stage_id - 2]) - - return ret + return [[s0.state_object, stage_id - 2]] ###################################################################### diff --git a/gallery/how_to/tune_with_autotvm/tune_relay_arm.py b/gallery/how_to/tune_with_autotvm/tune_relay_arm.py index ab278021d2..a19a8b2c3d 100644 --- a/gallery/how_to/tune_with_autotvm/tune_relay_arm.py +++ b/gallery/how_to/tune_with_autotvm/tune_relay_arm.py @@ -122,7 +122,7 @@ def get_network(name, batch_size): ) mod = tvm.IRModule.from_expr(net) else: - raise ValueError("Unsupported network: " + name) + raise ValueError(f"Unsupported network: {name}") return mod, params, input_shape, output_shape @@ -218,7 +218,7 @@ def get_network(name, batch_size): #### TUNING OPTION #### network = "resnet-18" -log_file = "%s.%s.log" % (device_key, network) +log_file = f"{device_key}.{network}.log" dtype = "float32" tuning_option = { @@ -272,7 +272,7 @@ def tune_tasks( use_transfer_learning=True, ): # create tmp log file - tmp_log_file = log_filename + ".tmp" + tmp_log_file = f"{log_filename}.tmp" if os.path.exists(tmp_log_file): os.remove(tmp_log_file) @@ -280,7 +280,7 @@ def tune_tasks( prefix = "[Task %2d/%2d] " % (i + 1, len(tasks)) # create tuner - if tuner == "xgb" or tuner == "xgb-rank": + if tuner in ["xgb", "xgb-rank"]: tuner_obj = XGBTuner(tsk, loss_type="rank") elif tuner == "xgb_knob": tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob") @@ -295,11 +295,10 @@ def tune_tasks( elif tuner == "gridsearch": tuner_obj = GridSearchTuner(tsk) else: - raise ValueError("Invalid tuner: " + tuner) + raise ValueError(f"Invalid tuner: {tuner}") - if use_transfer_learning: - if os.path.isfile(tmp_log_file): - tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file)) + if use_transfer_learning and os.path.isfile(tmp_log_file): + tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file)) # process tuning tsk_trial = min(n_trial, len(tsk.config_space)) diff --git a/gallery/how_to/tune_with_autotvm/tune_relay_cuda.py b/gallery/how_to/tune_with_autotvm/tune_relay_cuda.py index 459b2798c2..ef846aadd8 100644 --- a/gallery/how_to/tune_with_autotvm/tune_relay_cuda.py +++ b/gallery/how_to/tune_with_autotvm/tune_relay_cuda.py @@ -119,7 +119,7 @@ def get_network(name, batch_size): ) mod = tvm.IRModule.from_expr(net) else: - raise ValueError("Unsupported network: " + name) + raise ValueError(f"Unsupported network: {name}") return mod, params, input_shape, output_shape @@ -134,7 +134,7 @@ def get_network(name, batch_size): #### TUNING OPTION #### network = "resnet-18" -log_file = "%s.log" % network +log_file = f"{network}.log" dtype = "float32" tuning_option = { @@ -180,7 +180,7 @@ def tune_tasks( use_transfer_learning=True, ): # create tmp log file - tmp_log_file = log_filename + ".tmp" + tmp_log_file = f"{log_filename}.tmp" if os.path.exists(tmp_log_file): os.remove(tmp_log_file) @@ -188,7 +188,7 @@ def tune_tasks( prefix = "[Task %2d/%2d] " % (i + 1, len(tasks)) # create tuner - if tuner == "xgb" or tuner == "xgb-rank": + if tuner in ["xgb", "xgb-rank"]: tuner_obj = XGBTuner(tsk, loss_type="rank") elif tuner == "ga": tuner_obj = GATuner(tsk, pop_size=100) @@ -197,11 +197,10 @@ def tune_tasks( elif tuner == "gridsearch": tuner_obj = GridSearchTuner(tsk) else: - raise ValueError("Invalid tuner: " + tuner) + raise ValueError(f"Invalid tuner: {tuner}") - if use_transfer_learning: - if os.path.isfile(tmp_log_file): - tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file)) + if use_transfer_learning and os.path.isfile(tmp_log_file): + tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file)) # do tuning tsk_trial = min(n_trial, len(tsk.config_space)) diff --git a/gallery/how_to/tune_with_autotvm/tune_relay_mobile_gpu.py b/gallery/how_to/tune_with_autotvm/tune_relay_mobile_gpu.py index 5a4f0c56d2..bbabc65c5f 100644 --- a/gallery/how_to/tune_with_autotvm/tune_relay_mobile_gpu.py +++ b/gallery/how_to/tune_with_autotvm/tune_relay_mobile_gpu.py @@ -121,7 +121,7 @@ def get_network(name, batch_size): ) mod = tvm.IRModule.from_expr(net) else: - raise ValueError("Unsupported network: " + name) + raise ValueError(f"Unsupported network: {name}") return mod, params, input_shape, output_shape @@ -219,7 +219,7 @@ def get_network(name, batch_size): #### TUNING OPTION #### network = "resnet-18" -log_file = "%s.%s.log" % (device_key, network) +log_file = f"{device_key}.{network}.log" dtype = "float32" tuning_option = { @@ -269,7 +269,7 @@ def tune_tasks( use_transfer_learning=True, ): # create tmp log file - tmp_log_file = log_filename + ".tmp" + tmp_log_file = f"{log_filename}.tmp" if os.path.exists(tmp_log_file): os.remove(tmp_log_file) @@ -277,7 +277,7 @@ def tune_tasks( prefix = "[Task %2d/%2d] " % (i + 1, len(tasks)) # create tuner - if tuner == "xgb" or tuner == "xgb-rank": + if tuner in ["xgb", "xgb-rank"]: tuner_obj = XGBTuner(tsk, loss_type="rank") elif tuner == "ga": tuner_obj = GATuner(tsk, pop_size=50) @@ -286,11 +286,10 @@ def tune_tasks( elif tuner == "gridsearch": tuner_obj = GridSearchTuner(tsk) else: - raise ValueError("Invalid tuner: " + tuner) + raise ValueError(f"Invalid tuner: {tuner}") - if use_transfer_learning: - if os.path.isfile(tmp_log_file): - tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file)) + if use_transfer_learning and os.path.isfile(tmp_log_file): + tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file)) # do tuning tsk_trial = min(n_trial, len(tsk.config_space)) diff --git a/gallery/how_to/tune_with_autotvm/tune_relay_x86.py b/gallery/how_to/tune_with_autotvm/tune_relay_x86.py index 6e46fbd8ff..3381012d04 100644 --- a/gallery/how_to/tune_with_autotvm/tune_relay_x86.py +++ b/gallery/how_to/tune_with_autotvm/tune_relay_x86.py @@ -91,7 +91,7 @@ def get_network(name, batch_size): ) mod = tvm.IRModule.from_expr(net) else: - raise ValueError("Unsupported network: " + name) + raise ValueError(f"Unsupported network: {name}") return mod, params, input_shape, output_shape @@ -106,8 +106,8 @@ def get_network(name, batch_size): batch_size = 1 dtype = "float32" model_name = "resnet-18" -log_file = "%s.log" % model_name -graph_opt_sch_file = "%s_graph_opt.log" % model_name +log_file = f"{model_name}.log" +graph_opt_sch_file = f"{model_name}_graph_opt.log" # Set the input name of the graph # For ONNX models, it is typically "0". @@ -159,7 +159,7 @@ def tune_kernels( prefix = "[Task %2d/%2d] " % (i + 1, len(tasks)) # create tuner - if tuner == "xgb" or tuner == "xgb-rank": + if tuner in ["xgb", "xgb-rank"]: tuner_obj = XGBTuner(task, loss_type="rank") elif tuner == "ga": tuner_obj = GATuner(task, pop_size=50) @@ -168,7 +168,7 @@ def tune_kernels( elif tuner == "gridsearch": tuner_obj = GridSearchTuner(task) else: - raise ValueError("Invalid tuner: " + tuner) + raise ValueError(f"Invalid tuner: {tuner}") # do tuning n_trial = len(task.config_space) diff --git a/gallery/how_to/work_with_microtvm/micro_tflite.py b/gallery/how_to/work_with_microtvm/micro_tflite.py index dfe33eedac..bcb4f1fbbd 100644 --- a/gallery/how_to/work_with_microtvm/micro_tflite.py +++ b/gallery/how_to/work_with_microtvm/micro_tflite.py @@ -25,6 +25,7 @@ model with Relay. """ + # sphinx_gallery_start_ignore from tvm import testing @@ -161,7 +162,7 @@ ###################################################################### # Print out the version of the model version = tflite_model.Version() -print("Model Version: " + str(version)) +print(f"Model Version: {str(version)}") ###################################################################### # Parse the python model object to convert it into a relay module @@ -327,4 +328,4 @@ graph_mod.run() tvm_output = graph_mod.get_output(0).numpy() - print("result is: " + str(tvm_output)) + print(f"result is: {str(tvm_output)}") diff --git a/gallery/how_to/work_with_microtvm/micro_train.py b/gallery/how_to/work_with_microtvm/micro_train.py index f75c0b05eb..12a918ec0a 100644 --- a/gallery/how_to/work_with_microtvm/micro_train.py +++ b/gallery/how_to/work_with_microtvm/micro_train.py @@ -26,6 +26,7 @@ deployed to Arduino using TVM. """ + ###################################################################### # .. note:: # @@ -208,8 +209,9 @@ batch_size=32, shuffle=True, label_mode="categorical", - image_size=IMAGE_SIZE[0:2], + image_size=IMAGE_SIZE[:2], ) + rescale = tf.keras.layers.Rescaling(scale=1.0 / 255) full_dataset = unscaled_dataset.map(lambda im, lbl: (rescale(im), lbl)) diff --git a/gallery/how_to/work_with_relay/build_gcn.py b/gallery/how_to/work_with_relay/build_gcn.py index 8953ffc2e4..8a81c39845 100644 --- a/gallery/how_to/work_with_relay/build_gcn.py +++ b/gallery/how_to/work_with_relay/build_gcn.py @@ -53,18 +53,15 @@ def __init__(self, g, n_infeat, n_hidden, n_classes, n_layers, activation): self.g = g self.layers = nn.ModuleList() self.layers.append(GraphConv(n_infeat, n_hidden, activation=activation)) - for i in range(n_layers - 1): + for _ in range(n_layers - 1): self.layers.append(GraphConv(n_hidden, n_hidden, activation=activation)) self.layers.append(GraphConv(n_hidden, n_classes)) def forward(self, features): h = features - for i, layer in enumerate(self.layers): + for layer in self.layers: # handle api changes for differnt DGL version - if dgl.__version__ > "0.3": - h = layer(self.g, h) - else: - h = layer(h, self.g) + h = layer(self.g, h) if dgl.__version__ > "0.3" else layer(h, self.g) return h @@ -92,9 +89,7 @@ def evaluate(data, logits): test_mask = data.test_mask # the test set which isn't included in the training phase pred = logits.argmax(axis=1) - acc = ((pred == data.labels) * test_mask).sum() / test_mask.sum() - - return acc + return ((pred == data.labels) * test_mask).sum() / test_mask.sum() ###################################################################### @@ -119,6 +114,7 @@ def evaluate(data, logits): dimension of model output (Number of classes) """ + # sphinx_gallery_start_ignore from tvm import testing @@ -146,8 +142,12 @@ def evaluate(data, logits): torch_model = GCN(dgl_g, infeat_dim, num_hidden, num_classes, num_layers, F.relu) # Download the pretrained weights -model_url = "https://homes.cs.washington.edu/~cyulin/media/gnn_model/gcn_%s.torch" % (dataset) -model_path = download_testdata(model_url, "gcn_%s.pickle" % (dataset), module="gcn_model") +model_url = f"https://homes.cs.washington.edu/~cyulin/media/gnn_model/gcn_{dataset}.torch" + +model_path = download_testdata( + model_url, f"gcn_{dataset}.pickle", module="gcn_model" +) + # Load the weights into the model torch_model.load_state_dict(torch.load(model_path)) @@ -222,7 +222,7 @@ def GraphConv(layer_name, input_dim, output_dim, adj, input, norm=None, bias=Tru if norm is not None: input = relay.multiply(input, norm) - weight = relay.var(layer_name + ".weight", shape=(input_dim, output_dim)) + weight = relay.var(f"{layer_name}.weight", shape=(input_dim, output_dim)) weight_t = relay.transpose(weight) dense = relay.nn.dense(weight_t, input) output = relay.nn.sparse_dense(dense, adj) @@ -230,7 +230,7 @@ def GraphConv(layer_name, input_dim, output_dim, adj, input, norm=None, bias=Tru if norm is not None: output_t = relay.multiply(output_t, norm) if bias is True: - _bias = relay.var(layer_name + ".bias", shape=(output_dim, 1)) + _bias = relay.var(f"{layer_name}.bias", shape=(output_dim, 1)) output_t = relay.nn.bias_add(output_t, _bias, axis=-1) if activation is not None: output_t = activation(output_t) @@ -246,11 +246,7 @@ def GraphConv(layer_name, input_dim, output_dim, adj, input, norm=None, bias=Tru def prepare_params(g, data): - params = {} - params["infeats"] = data.features.numpy().astype( - "float32" - ) # Only support float32 as feature for now - + params = {"infeats": data.features.numpy().astype("float32")} # Generate adjacency matrix adjacency = nx.to_scipy_sparse_matrix(g) params["g_data"] = adjacency.data.astype("float32") @@ -289,8 +285,7 @@ def prepare_params(g, data): adj = Adjacency(g_data, indices, indptr) # Construct the 2-layer GCN -layers = [] -layers.append( +layers = [ GraphConv( layer_name="layers.0", input_dim=infeat_dim, @@ -300,7 +295,8 @@ def prepare_params(g, data): norm=norm, activation=relay.nn.relu, ) -) +] + layers.append( GraphConv( layer_name="layers.1", @@ -321,9 +317,10 @@ def prepare_params(g, data): # ------------------------ # # Export the weights from PyTorch model to Python Dict -model_params = {} -for param_tensor in torch_model.state_dict(): - model_params[param_tensor] = torch_model.state_dict()[param_tensor].numpy() +model_params = { + param_tensor: torch_model.state_dict()[param_tensor].numpy() + for param_tensor in torch_model.state_dict() +} for i in range(num_layers + 1): params["layers.%d.weight" % (i)] = model_params["layers.%d.weight" % (i)] diff --git a/gallery/how_to/work_with_relay/using_pipeline_executor.py b/gallery/how_to/work_with_relay/using_pipeline_executor.py index 87516d656d..9088d60bce 100755 --- a/gallery/how_to/work_with_relay/using_pipeline_executor.py +++ b/gallery/how_to/work_with_relay/using_pipeline_executor.py @@ -135,10 +135,13 @@ def @main(%data_n_0: Tensor[(1, 16, 8, 8), float16] /* ty=Tensor[(1, 16, 8, 8), def cutlass_build(mod, target, params=None, target_host=None, mod_name="default"): target = [target, cutlass] - lib = relay.build_module.build( - mod, target=target, params=params, target_host=target_host, mod_name=mod_name + return relay.build_module.build( + mod, + target=target, + params=params, + target_host=target_host, + mod_name=mod_name, ) - return lib ########################################################### diff --git a/gallery/how_to/work_with_schedules/extern_op.py b/gallery/how_to/work_with_schedules/extern_op.py index ad741a08d5..c466262b8b 100644 --- a/gallery/how_to/work_with_schedules/extern_op.py +++ b/gallery/how_to/work_with_schedules/extern_op.py @@ -118,7 +118,7 @@ # @tvm.register_func("tvm.contrib.my_tvm_addone") def my_tvm_addone(x, y): - print("my_tvm_addone signatures: %s, %s" % (type(x), type(y))) + print(f"my_tvm_addone signatures: {type(x)}, {type(y)}") tvm.nd.array(x.numpy() + 1).copyto(y) diff --git a/gallery/how_to/work_with_schedules/intrin_math.py b/gallery/how_to/work_with_schedules/intrin_math.py index 5a8732abd7..a527038d08 100644 --- a/gallery/how_to/work_with_schedules/intrin_math.py +++ b/gallery/how_to/work_with_schedules/intrin_math.py @@ -110,7 +110,7 @@ def my_cuda_math_rule(op): dispatch_name = name[4:] if op.dtype == "float32": # call float function - return tvm.tir.call_pure_extern("float32", "%sf" % dispatch_name, op.args[0]) + return tvm.tir.call_pure_extern("float32", f"{dispatch_name}f", op.args[0]) elif op.dtype == "float64": # call double function return tvm.tir.call_pure_extern("float32", dispatch_name, op.args[0]) diff --git a/gallery/how_to/work_with_schedules/tensorize.py b/gallery/how_to/work_with_schedules/tensorize.py index 45eaf349f3..80cfddcca7 100644 --- a/gallery/how_to/work_with_schedules/tensorize.py +++ b/gallery/how_to/work_with_schedules/tensorize.py @@ -171,9 +171,7 @@ def gemv_impl(): temp = utils.tempdir() ll_path = temp.relpath("temp.ll") - # Create LLVM ir from c source code - ll_code = clang.create_llvm(cc_code, output=ll_path) - return ll_code + return clang.create_llvm(cc_code, output=ll_path) ###################################################################### @@ -239,9 +237,7 @@ def gemv_impl(): temp = utils.tempdir() ll_path = temp.relpath("temp.ll") - # Create LLVM ir from c source code - ll_code = clang.create_llvm(cc_code, output=ll_path) - return ll_code + return clang.create_llvm(cc_code, output=ll_path) def intrin_gemv(m, l): diff --git a/gallery/tutorial/autotvm_relay_x86.py b/gallery/tutorial/autotvm_relay_x86.py index b7dfbe28f4..6bc0b72944 100644 --- a/gallery/tutorial/autotvm_relay_x86.py +++ b/gallery/tutorial/autotvm_relay_x86.py @@ -42,6 +42,7 @@ how to use them through the Python API. """ + # sphinx_gallery_start_ignore from tvm import testing @@ -175,7 +176,7 @@ with tvm.transform.PassContext(opt_level=3): lib = relay.build(mod, target=target, params=params) -dev = tvm.device(str(target), 0) +dev = tvm.device(target, 0) module = graph_executor.GraphModule(lib["default"](dev)) ###################################################################### @@ -243,7 +244,7 @@ scores = softmax(tvm_output) scores = np.squeeze(scores) ranks = np.argsort(scores)[::-1] -for rank in ranks[0:5]: +for rank in ranks[:5]: print("class='%s' with probability=%f" % (labels[rank], scores[rank])) ################################################################################ @@ -417,7 +418,7 @@ with tvm.transform.PassContext(opt_level=3, config={}): lib = relay.build(mod, target=target, params=params) -dev = tvm.device(str(target), 0) +dev = tvm.device(target, 0) module = graph_executor.GraphModule(lib["default"](dev)) ################################################################################ @@ -432,7 +433,7 @@ scores = softmax(tvm_output) scores = np.squeeze(scores) ranks = np.argsort(scores)[::-1] -for rank in ranks[0:5]: +for rank in ranks[:5]: print("class='%s' with probability=%f" % (labels[rank], scores[rank])) ################################################################################ @@ -466,8 +467,8 @@ optimized = {"mean": np.mean(optimized), "median": np.median(optimized), "std": np.std(optimized)} -print("optimized: %s" % (optimized)) -print("unoptimized: %s" % (unoptimized)) +print(f"optimized: {optimized}") +print(f"unoptimized: {unoptimized}") ################################################################################ # Final Remarks diff --git a/gallery/tutorial/cross_compilation_and_rpc.py b/gallery/tutorial/cross_compilation_and_rpc.py index 3f74899f7b..c12219da22 100644 --- a/gallery/tutorial/cross_compilation_and_rpc.py +++ b/gallery/tutorial/cross_compilation_and_rpc.py @@ -31,6 +31,7 @@ and the Firefly-RK3399 for an OpenCL example. """ + # sphinx_gallery_start_ignore from tvm import testing @@ -119,11 +120,7 @@ local_demo = True -if local_demo: - target = "llvm" -else: - target = "llvm -mtriple=armv7l-linux-gnueabihf" - +target = "llvm" if local_demo else "llvm -mtriple=armv7l-linux-gnueabihf" func = tvm.build(s, [A, B], target=target, name="add_one") # save the lib at a local temp folder temp = utils.tempdir() diff --git a/gallery/tutorial/relay_quick_start.py b/gallery/tutorial/relay_quick_start.py index 8910817c21..9ef128078b 100644 --- a/gallery/tutorial/relay_quick_start.py +++ b/gallery/tutorial/relay_quick_start.py @@ -26,6 +26,7 @@ Notice that you need to build TVM with cuda and llvm enabled. """ + # sphinx_gallery_start_ignore from tvm import testing @@ -126,7 +127,7 @@ out = module.get_output(0, tvm.nd.empty(out_shape)).numpy() # Print first 10 elements of output -print(out.flatten()[0:10]) +print(out.flatten()[:10]) ###################################################################### # Save and Load Compiled Module @@ -155,7 +156,7 @@ out_deploy = module.get_output(0).numpy() # Print first 10 elements of output -print(out_deploy.flatten()[0:10]) +print(out_deploy.flatten()[:10]) # check whether the output from deployed module is consistent with original one tvm.testing.assert_allclose(out_deploy, out, atol=1e-5) diff --git a/gallery/tutorial/tensor_ir_blitz_course.py b/gallery/tutorial/tensor_ir_blitz_course.py index a62fa39793..001886bac2 100644 --- a/gallery/tutorial/tensor_ir_blitz_course.py +++ b/gallery/tutorial/tensor_ir_blitz_course.py @@ -82,11 +82,11 @@ @tvm.script.ir_module class MyModule: @T.prim_func - def main(a: T.handle, b: T.handle): + def main(self, b: T.handle): # We exchange data between function by handles, which are similar to pointer. T.func_attr({"global_symbol": "main", "tir.noalias": True}) # Create buffer from handles. - A = T.match_buffer(a, (8,), dtype="float32") + A = T.match_buffer(self, (8,), dtype="float32") B = T.match_buffer(b, (8,), dtype="float32") for i in range(8): # A block is an abstraction for computation. diff --git a/golang/sample/gen_mobilenet_lib.py b/golang/sample/gen_mobilenet_lib.py index 12f215b4fd..eb064338c4 100644 --- a/golang/sample/gen_mobilenet_lib.py +++ b/golang/sample/gen_mobilenet_lib.py @@ -32,7 +32,7 @@ def extract(path): tar.extractall(path=dir_path) tar.close() else: - raise RuntimeError("Could not decompress the file: " + path) + raise RuntimeError(f"Could not decompress the file: {path}") ################################### diff --git a/jvm/core/src/test/scripts/test_add_gpu.py b/jvm/core/src/test/scripts/test_add_gpu.py index 21fd9edc06..cf1e70b81a 100644 --- a/jvm/core/src/test/scripts/test_add_gpu.py +++ b/jvm/core/src/test/scripts/test_add_gpu.py @@ -23,13 +23,12 @@ @tvm.register_func("tvm_callback_cuda_compile", override=True) def tvm_callback_cuda_compile(code): - ptx = nvcc.compile_cuda(code, target_format="ptx") - return ptx + return nvcc.compile_cuda(code, target_format="ptx") def test_add(target_dir): if not tvm.runtime.enabled("cuda"): - print("skip %s because cuda is not enabled..." % __file__) + print(f"skip {__file__} because cuda is not enabled...") return n = te.var("n") A = te.placeholder((n,), name="A") diff --git a/python/gen_requirements.py b/python/gen_requirements.py index 7e2c3e2186..628931f722 100755 --- a/python/gen_requirements.py +++ b/python/gen_requirements.py @@ -433,8 +433,7 @@ def parse_semver( # Major/minor version handling is simple for i, p in enumerate(min_ver_parts[:2]): - x = int(p.strip()) - if x: + if x := int(p.strip()): return min_ver_parts, i, x # For patch version, consult only the numeric patch @@ -542,12 +541,10 @@ def __init__(self, config: str, problems: typing.List[str]): def validate_or_raise(): - problems = validate_requirements_by_piece() - if problems: + if problems := validate_requirements_by_piece(): raise ValidationError("REQUIREMENTS_BY_PIECE", problems) - problems = validate_constraints() - if problems: + if problems := validate_constraints(): raise ValidationError("CONSTRAINTS", problems) @@ -624,7 +621,7 @@ def join_and_write_requirements(args: argparse.Namespace): joined_deps = join_requirements() except ValidationError as e: print(f"ERROR: invalid requirements configuration in {__file__}:", file=sys.stderr) - print(str(e), file=sys.stderr) + print(e, file=sys.stderr) sys.exit(2) if args.lint: diff --git a/python/setup.py b/python/setup.py index a75888c836..6ad82b927b 100644 --- a/python/setup.py +++ b/python/setup.py @@ -90,7 +90,7 @@ def git_describe_version(original_version): exec(compile(open(ver_py, "rb").read(), ver_py, "exec"), libver, libver) _, gd_version = libver["git_describe_version"]() if gd_version != original_version and "--inplace" not in sys.argv: - print("Use git describe based version %s" % gd_version) + print(f"Use git describe based version {gd_version}") return gd_version @@ -112,10 +112,7 @@ def config_cython(): from Cython.Build import cythonize # from setuptools.extension import Extension - if sys.version_info >= (3, 0): - subdir = "_cy3" - else: - subdir = "_cy2" + subdir = "_cy3" if sys.version_info >= (3, 0) else "_cy2" ret = [] path = "tvm/_ffi/_cython" extra_compile_args = ["-std=c++17", "-DDMLC_USE_LOGGING_LIBRARY="] @@ -130,24 +127,24 @@ def config_cython(): library_dirs = None libraries = None - for fn in os.listdir(path): - if not fn.endswith(".pyx"): - continue - ret.append( - Extension( - "tvm._ffi.%s.%s" % (subdir, fn[:-4]), - ["tvm/_ffi/_cython/%s" % fn], - include_dirs=[ - "../include/", - "../3rdparty/dmlc-core/include", - "../3rdparty/dlpack/include", - ], - extra_compile_args=extra_compile_args, - library_dirs=library_dirs, - libraries=libraries, - language="c++", - ) + ret.extend( + Extension( + f"tvm._ffi.{subdir}.{fn[:-4]}", + [f"tvm/_ffi/_cython/{fn}"], + include_dirs=[ + "../include/", + "../3rdparty/dmlc-core/include", + "../3rdparty/dlpack/include", + ], + extra_compile_args=extra_compile_args, + library_dirs=library_dirs, + libraries=libraries, + language="c++", ) + for fn in os.listdir(path) + if fn.endswith(".pyx") + ) + return cythonize(ret, compiler_directives={"language_level": 3}) except ImportError as error: if FFI_MODE == "cython": diff --git a/python/tvm/__init__.py b/python/tvm/__init__.py index 5b6fbe7b25..8b8393bca7 100644 --- a/python/tvm/__init__.py +++ b/python/tvm/__init__.py @@ -91,9 +91,10 @@ def _should_print_backtrace(): tvm_backtrace = bool(int(tvm_backtrace)) except ValueError: raise ValueError( - "invalid value for TVM_BACKTRACE {}, please set to 0 or 1.".format(tvm_backtrace) + f"invalid value for TVM_BACKTRACE {tvm_backtrace}, please set to 0 or 1." ) + return in_pytest or tvm_backtrace diff --git a/python/tvm/_ffi/_ctypes/packed_func.py b/python/tvm/_ffi/_ctypes/packed_func.py index bf763a1943..fe5b12f6ae 100644 --- a/python/tvm/_ffi/_ctypes/packed_func.py +++ b/python/tvm/_ffi/_ctypes/packed_func.py @@ -124,8 +124,11 @@ def _make_tvm_args(args, temp_args): elif isinstance(arg, NDArrayBase): values[i].v_handle = ctypes.cast(arg.handle, ctypes.c_void_p) type_codes[i] = ( - ArgTypeCode.NDARRAY_HANDLE if not arg.is_view else ArgTypeCode.DLTENSOR_HANDLE + ArgTypeCode.DLTENSOR_HANDLE + if arg.is_view + else ArgTypeCode.NDARRAY_HANDLE ) + elif isinstance(arg, PyNativeObject): values[i].v_handle = arg.__tvm_object__.handle type_codes[i] = ArgTypeCode.OBJECT_HANDLE @@ -209,9 +212,12 @@ def __init__(self, handle, is_global): self.is_global = is_global def __del__(self): - if not self.is_global and _LIB is not None: - if _LIB.TVMFuncFree(self.handle) != 0: - raise get_last_ffi_error() + if ( + not self.is_global + and _LIB is not None + and _LIB.TVMFuncFree(self.handle) != 0 + ): + raise get_last_ffi_error() def __call__(self, *args): """Call the function with positional arguments @@ -291,7 +297,7 @@ def _get_global_func(name, allow_missing=False): if allow_missing: return None - raise ValueError("Cannot find global function %s" % name) + raise ValueError(f"Cannot find global function {name}") # setup return handle for function type diff --git a/python/tvm/_ffi/_pyversion.py b/python/tvm/_ffi/_pyversion.py index b661cfd875..746e891110 100644 --- a/python/tvm/_ffi/_pyversion.py +++ b/python/tvm/_ffi/_pyversion.py @@ -16,11 +16,12 @@ # under the License. """Python version check """ + import sys # ---------------------------- # Python3 version. # ---------------------------- -if not (sys.version_info[0] >= 3 and sys.version_info[1] >= 6): +if sys.version_info[0] < 3 or sys.version_info[1] < 6: PY3STATEMENT = "The minimal Python requirement is Python 3.6" raise Exception(PY3STATEMENT) diff --git a/python/tvm/_ffi/base.py b/python/tvm/_ffi/base.py index 744e4c93e1..af1c709e69 100644 --- a/python/tvm/_ffi/base.py +++ b/python/tvm/_ffi/base.py @@ -38,7 +38,7 @@ def _py_str(x): try: return x.decode("utf-8") except UnicodeDecodeError: - encoding = "cp" + str(ctypes.cdll.kernel32.GetACP()) + encoding = f"cp{str(ctypes.cdll.kernel32.GetACP())}" return x.decode(encoding) py_str = _py_str @@ -181,9 +181,7 @@ def register(mycls): ERROR_TYPE[err_name] = mycls return mycls - if cls is None: - return register - return register(cls) + return register if cls is None else register(cls) def _valid_error_name(name): @@ -217,17 +215,12 @@ def _find_error_type(line): err_name = line[:end_pos].strip() else: err_name = line[start_pos + 1 : end_pos].strip() - if _valid_error_name(err_name): - return err_name - return None - + return err_name if _valid_error_name(err_name) else None end_pos = line.find(":") if end_pos == -1: return None err_name = line[:end_pos] - if _valid_error_name(err_name): - return err_name - return None + return err_name if _valid_error_name(err_name) else None def c2pyerror(err_msg): @@ -255,7 +248,7 @@ def c2pyerror(err_msg): message = [] for line in arr: if trace_mode: - if line.startswith(" ") and len(stack_trace) > 0: + if line.startswith(" ") and stack_trace: stack_trace[-1] += "\n" + line elif line.startswith(" "): stack_trace.append(line) diff --git a/python/tvm/_ffi/libinfo.py b/python/tvm/_ffi/libinfo.py index 44ce10bd5a..f0f5704b93 100644 --- a/python/tvm/_ffi/libinfo.py +++ b/python/tvm/_ffi/libinfo.py @@ -64,19 +64,23 @@ def get_dll_directories(): elif sys.platform.startswith("win32"): dll_path.extend(split_env_var("PATH", ";")) - # Pip lib directory - dll_path.append(os.path.join(ffi_dir, "..")) - # Default cmake build directory - dll_path.append(os.path.join(source_dir, "build")) - dll_path.append(os.path.join(source_dir, "build", "Release")) - # Default make build directory - dll_path.append(os.path.join(source_dir, "lib")) - - dll_path.append(install_lib_dir) + dll_path.extend( + ( + os.path.join(ffi_dir, ".."), + os.path.join(source_dir, "build"), + os.path.join(source_dir, "build", "Release"), + os.path.join(source_dir, "lib"), + install_lib_dir, + ) + ) if os.path.isdir(source_dir): - dll_path.append(os.path.join(source_dir, "web", "dist", "wasm")) - dll_path.append(os.path.join(source_dir, "web", "dist")) + dll_path.extend( + ( + os.path.join(source_dir, "web", "dist", "wasm"), + os.path.join(source_dir, "web", "dist"), + ) + ) dll_path = [os.path.realpath(x) for x in dll_path] return [x for x in dll_path if os.path.isdir(x)] @@ -175,13 +179,11 @@ def find_include_path(name=None, search_path=None, optional=False): if os.environ.get("TVM_INCLUDE_PATH", None): header_path.append(os.environ["TVM_INCLUDE_PATH"]) - header_path.append(source_dir) - header_path.append(third_party_dir) - + header_path.extend((source_dir, third_party_dir)) header_path = [os.path.abspath(x) for x in header_path] if search_path is not None: if isinstance(search_path, list): - header_path = header_path + search_path + header_path += search_path else: header_path.append(search_path) if name is not None: @@ -207,8 +209,9 @@ def find_include_path(name=None, search_path=None, optional=False): message = ( "Cannot find the files.\n" + "List of candidates:\n" - + str("\n".join(tvm_include_path + dlpack_include_path)) + + "\n".join(tvm_include_path + dlpack_include_path) ) + if not optional: raise RuntimeError(message) return None diff --git a/python/tvm/_ffi/registry.py b/python/tvm/_ffi/registry.py index 1b6b1dec9a..191789d020 100644 --- a/python/tvm/_ffi/registry.py +++ b/python/tvm/_ffi/registry.py @@ -76,10 +76,7 @@ def register(cls): _register_object(tindex, cls) return cls - if isinstance(type_key, str): - return register - - return register(type_key) + return register if isinstance(type_key, str) else register(type_key) def get_object_type_index(cls): @@ -210,9 +207,7 @@ def register(myf): check_call(_LIB.TVMFuncRegisterGlobal(c_str(func_name), myf.handle, ioverride)) return myf - if f: - return register(f) - return register + return register(f) if f else register def get_global_func(name, allow_missing=False): @@ -246,10 +241,7 @@ def list_global_func_names(): size = ctypes.c_uint() check_call(_LIB.TVMFuncListGlobalNames(ctypes.byref(size), ctypes.byref(plist))) - fnames = [] - for i in range(size.value): - fnames.append(py_str(plist[i])) - return fnames + return [py_str(plist[i]) for i in range(size.value)] def extract_ext_funcs(finit): @@ -275,7 +267,7 @@ def _list(name, func): ret = finit(myf.handle) _ = myf if ret != 0: - raise RuntimeError("cannot initialize with %s" % finit) + raise RuntimeError(f"cannot initialize with {finit}") return fdict @@ -305,7 +297,7 @@ def _init_api(namespace, target_module_name=None): target_module_name : str The target module name if different from namespace """ - target_module_name = target_module_name if target_module_name else namespace + target_module_name = target_module_name or namespace if namespace.startswith("tvm."): _init_api_prefix(target_module_name, namespace[4:]) else: @@ -327,5 +319,5 @@ def _init_api_prefix(module_name, prefix): f = get_global_func(name) ff = _get_api(f) ff.__name__ = fname - ff.__doc__ = "TVM PackedFunc %s. " % fname + ff.__doc__ = f"TVM PackedFunc {fname}. " setattr(target_module, ff.__name__, ff) diff --git a/python/tvm/_ffi/runtime_ctypes.py b/python/tvm/_ffi/runtime_ctypes.py index 0b14c80bdb..465177b0da 100644 --- a/python/tvm/_ffi/runtime_ctypes.py +++ b/python/tvm/_ffi/runtime_ctypes.py @@ -147,12 +147,12 @@ def __init__(self, type_str): low, high = head.find("["), head.find("]") if not low or not high or low >= high: - raise ValueError("Badly formatted custom type string %s" % type_str) + raise ValueError(f"Badly formatted custom type string {type_str}") type_name = head[low + 1 : high] self.type_code = tvm.runtime._ffi_api._datatype_get_type_code(type_name) head = head[high + 1 :] else: - raise ValueError("Do not know how to handle type %s" % type_str) + raise ValueError(f"Do not know how to handle type {type_str}") bits = int(head) if head else bits self.bits = bits @@ -165,7 +165,8 @@ def __repr__(self): else: import tvm.runtime._ffi_api - type_name = "custom[%s]" % tvm.runtime._ffi_api._datatype_get_type_name(self.type_code) + type_name = f"custom[{tvm.runtime._ffi_api._datatype_get_type_name(self.type_code)}]" + x = "%s%d" % (type_name, self.bits) if self.lanes != 1: x += "x%d" % self.lanes diff --git a/python/tvm/arith/analyzer.py b/python/tvm/arith/analyzer.py index 28adbe9d81..f3d217d021 100644 --- a/python/tvm/arith/analyzer.py +++ b/python/tvm/arith/analyzer.py @@ -251,7 +251,7 @@ def update(self, var, info, override=False): if isinstance(info, ConstIntBound): self._const_int_bound_update(var, info, override) else: - raise TypeError("Do not know how to handle type {}".format(type(info))) + raise TypeError(f"Do not know how to handle type {type(info)}") def can_prove_equal(self, lhs: "PrimExpr", rhs: "PrimExpr"): """Whether we can prove that lhs == rhs diff --git a/python/tvm/auto_scheduler/compute_dag.py b/python/tvm/auto_scheduler/compute_dag.py index c212d143f9..69efdfab2d 100755 --- a/python/tvm/auto_scheduler/compute_dag.py +++ b/python/tvm/auto_scheduler/compute_dag.py @@ -66,17 +66,17 @@ def get_target_default(target, in_relay_integration=False): layout_rewrite_option: LayoutRewriteOption The default layout rewrite option for the specified target. """ - layout_rewrite_option = LayoutRewriteOption.NO_REWRITE if target.kind.name == "llvm" or ( "device" in target.attrs and target.attrs["device"] == "mali" ): - layout_rewrite_option = ( + return ( LayoutRewriteOption.REWRITE_FOR_PRE_TRANSFORMED if in_relay_integration else LayoutRewriteOption.INSERT_TRANSFORM_STAGE ) - return layout_rewrite_option + else: + return LayoutRewriteOption.NO_REWRITE @tvm._ffi.register_object("auto_scheduler.ComputeDAG") @@ -108,9 +108,9 @@ def __init__(self, compute_or_sche): for item in compute_or_sche: if not isinstance(item, tvm.te.Tensor): raise ValueError( - "The input of ComputeDAG should be a list of Tensor, but got %s" - % type(item) + f"The input of ComputeDAG should be a list of Tensor, but got {type(item)}" ) + compute = compute_or_sche sche = None elif isinstance(compute_or_sche, tvm.te.Schedule): @@ -118,9 +118,9 @@ def __init__(self, compute_or_sche): sche = compute_or_sche else: raise ValueError( - "Invalid compute type: %s. ComputeDAG expects string, list of Tensor, or Schedule" - % type(compute_or_sche) + f"Invalid compute type: {type(compute_or_sche)}. ComputeDAG expects string, list of Tensor, or Schedule" ) + self.__init_handle_by_constructor__(_ffi_api.ComputeDAG, compute, sche) def get_init_state(self): @@ -240,9 +240,7 @@ def workload_key(self): else: hash_key = hash_func(str_dag) - io_shapes = [] - for tensor in self.tensors: - io_shapes.append(get_const_tuple(tensor.shape)) + io_shapes = [get_const_tuple(tensor.shape) for tensor in self.tensors] return json.dumps([hash_key] + io_shapes) def __str__(self): @@ -253,9 +251,8 @@ def __str__(self): lines = [] for line in raw_lines: if len(line) > MAX_LINE_WIDTH: - line = ( - line[: MAX_LINE_WIDTH // 2] + " ..(OMITTED).. " + line[-MAX_LINE_WIDTH // 2 :] - ) + line = f"{line[:MAX_LINE_WIDTH // 2]} ..(OMITTED).. {line[-MAX_LINE_WIDTH // 2:]}" + lines.append(line) return "\n".join(lines) diff --git a/python/tvm/auto_scheduler/cost_model/xgb_model.py b/python/tvm/auto_scheduler/cost_model/xgb_model.py index a4e39b9061..ab97aaef0a 100644 --- a/python/tvm/auto_scheduler/cost_model/xgb_model.py +++ b/python/tvm/auto_scheduler/cost_model/xgb_model.py @@ -288,7 +288,7 @@ def predict_stages(self, task, states): stage_scores = [[] for _ in range(len(states))] for pred, pack_id in zip(raw_preds, pack_ids): stage_scores[pack_id].append(pred) - for idx, stage_score in enumerate(stage_scores): + for stage_score in stage_scores: breakdown = np.append(breakdown, len(stage_score)) breakdown = np.concatenate((breakdown, np.array(stage_score))) else: @@ -436,8 +436,7 @@ def predict_throughput_pack_sum(raw_preds, pack_ids): throughputs: np.ndarray The throughput """ - sum_pred = np.bincount(pack_ids, weights=raw_preds) - return sum_pred + return np.bincount(pack_ids, weights=raw_preds) def pack_sum_square_error(preds, dtrain): @@ -568,11 +567,7 @@ def init(env): state["maximize_score"] = maximize state["best_iteration"] = 0 - if maximize: - state["best_score"] = float("-inf") - else: - state["best_score"] = float("inf") - + state["best_score"] = float("-inf") if maximize else float("inf") if bst is not None: if bst.attr("best_score") is not None: state["best_score"] = float(bst.attr("best_score")) @@ -613,7 +608,7 @@ def callback(env): eval_res = [] keys = list(res_dict.keys()) - keys.sort(key=lambda x: x if metric_shortname not in x else "a" + x) + keys.sort(key=lambda x: x if metric_shortname not in x else f"a{x}") for key in keys: v = res_dict[key] eval_res.append([key] + v) diff --git a/python/tvm/auto_scheduler/dispatcher.py b/python/tvm/auto_scheduler/dispatcher.py index 98566f8636..4937e13119 100644 --- a/python/tvm/auto_scheduler/dispatcher.py +++ b/python/tvm/auto_scheduler/dispatcher.py @@ -210,9 +210,8 @@ def load(self, records, n_lines=None): if isinstance(rec, str): rec = load_records(rec) joint_records += rec - else: - if rec is not None: - joint_records.append(rec) + elif rec is not None: + joint_records.append(rec) if not joint_records: return @@ -247,14 +246,13 @@ def load(self, records, n_lines=None): entry, _, workload_args = self.get_workload_entry( best_by_model, inp.task.target.model, inp.task.workload_key ) - if workload_args not in entry: - if inp.task.target.model != "unknown": - entry[workload_args] = (inp.state, cost) - else: + if workload_args in entry: _, other_cost = entry[workload_args] if other_cost > cost: entry[workload_args] = (inp.state, cost) + elif inp.task.target.model != "unknown": + entry[workload_args] = (inp.state, cost) logger.debug("Finish loading %d records", counter) def _query_inside(self, target, workload_key, func_name): @@ -380,7 +378,7 @@ def _query_inside(self, target, workload_key, func_name): task = SearchTask(workload_key=workload_key, target=target) measure_ctx = LocalRPCMeasureContext(min_repeat_ms=300) - log_file = self.log_dir.relpath("%s.log" % decode_workload_key(workload_key)[0]) + log_file = self.log_dir.relpath(f"{decode_workload_key(workload_key)[0]}.log") while ret is None: tune_option = TuningOptions( diff --git a/python/tvm/auto_scheduler/feature.py b/python/tvm/auto_scheduler/feature.py index 09d54a92fd..a04bf1ccc1 100644 --- a/python/tvm/auto_scheduler/feature.py +++ b/python/tvm/auto_scheduler/feature.py @@ -100,8 +100,6 @@ def unpack_feature(byte_arr: bytearray) -> Tuple[np.ndarray, np.ndarray, np.ndar # unpack features features = [] for size in sizes[:-2]: - row = [] - # Now, we need to unpack the feature for multiple statements. # The format is: # { @@ -126,6 +124,8 @@ def unpack_feature(byte_arr: bytearray) -> Tuple[np.ndarray, np.ndarray, np.ndar tmp_vec_len, ) assert tmp_vec_len * n_stmts == size - 1 + row = [] + for _ in range(n_stmts): x = struct.unpack_from("%df" % vec_len, byte_arr, offset=offset) offset += vec_len * SIZE_OF_FLOAT32 diff --git a/python/tvm/auto_scheduler/loop_state.py b/python/tvm/auto_scheduler/loop_state.py index 03cc00def6..68776b39f6 100644 --- a/python/tvm/auto_scheduler/loop_state.py +++ b/python/tvm/auto_scheduler/loop_state.py @@ -159,7 +159,7 @@ def bind(self, stage, iterator, thread_name): res_it : Iterator The binded Iterator. """ - if not thread_name in State.ANNOTATION_TRANS_TABLE.keys(): + if thread_name not in State.ANNOTATION_TRANS_TABLE.keys(): raise ValueError("Invalid thread_name: ", thread_name) self.state_object, res = _ffi_api.StateBind( @@ -211,8 +211,9 @@ def unroll(self, stage, iterator, max_unroll=None): self.state_object, self._resolve_stage_id(stage), iterator, - max_unroll if max_unroll else -1, + max_unroll or -1, ) + return res def vectorize(self, stage, iterator): @@ -592,7 +593,7 @@ def _resolve_stage_id(self, stage_id): if isinstance(stage_id, int): return stage_id raise ValueError( - "Invalid stage: " + stage_id + " . Expect to be a int, Operation or Tensor" + f"Invalid stage: {stage_id} . Expect to be a int, Operation or Tensor" ) def _update_stage_id_map(self): @@ -609,7 +610,7 @@ def __getitem__(self, key): key = key.op if isinstance(key, Operation): return self.stages[self.stage_id_map[key]] - raise ValueError("Invalid item: " + key + " . Expect to be a Operation or Tensor") + raise ValueError(f"Invalid item: {key} . Expect to be a Operation or Tensor") def __str__(self): return str(self.state_object) diff --git a/python/tvm/auto_scheduler/measure.py b/python/tvm/auto_scheduler/measure.py index 6f331499b0..11754982a0 100644 --- a/python/tvm/auto_scheduler/measure.py +++ b/python/tvm/auto_scheduler/measure.py @@ -167,8 +167,8 @@ class BuildResult(Object): """ def __init__(self, filename, args, error_no, error_msg, time_cost): - filename = filename if filename else "" - error_msg = error_msg if error_msg else "" + filename = filename or "" + error_msg = error_msg or "" self.__init_handle_by_constructor__( _ffi_api.BuildResult, filename, args, error_no, error_msg, time_cost @@ -194,7 +194,7 @@ class MeasureResult(Object): """ def __init__(self, costs, error_no, error_msg, all_cost, timestamp): - error_msg = error_msg if error_msg else "" + error_msg = error_msg or "" self.__init_handle_by_constructor__( _ffi_api.MeasureResult, costs, error_no, error_msg, all_cost, timestamp @@ -339,7 +339,7 @@ def __init__(self, timeout=15, n_parallel=multiprocessing.cpu_count(), build_fun BuildFunc.name = "custom" BuildFunc.build_func = build_func else: - raise ValueError("Invalid build_func" + build_func) + raise ValueError(f"Invalid build_func{build_func}") self.__init_handle_by_constructor__( _ffi_api.LocalBuilder, timeout, n_parallel, BuildFunc.name @@ -626,7 +626,7 @@ def _local_build_worker(inp_serialized, build_func, verbose): if error_no == 0: dirname = tempfile.mkdtemp() - filename = os.path.join(dirname, "tmp_func." + build_func.output_format) + filename = os.path.join(dirname, f"tmp_func.{build_func.output_format}") try: with transform.PassContext().current(): @@ -691,9 +691,10 @@ def local_builder_build(inputs, timeout, n_parallel, build_func="default", verbo res : List[BuildResult] The build results of these MeasureInputs. """ - assert build_func == BuildFunc.name, ( - "BuildFunc.name: " + BuildFunc.name + ", but args is: " + build_func - ) + assert ( + build_func == BuildFunc.name + ), f"BuildFunc.name: {BuildFunc.name}, but args is: {build_func}" + executor = PopenPoolExecutor( n_parallel, timeout, reset_global_scope, (AutotvmGlobalScope.current,) ) @@ -771,13 +772,11 @@ def check_task_input_by_placeholder_name(args : List[Tensor]): def register(myf): """internal register function""" if func_name in TASK_INPUT_CHECK_FUNC_REGISTRY and not override: - raise RuntimeError("%s has been registered already" % func_name) + raise RuntimeError(f"{func_name} has been registered already") TASK_INPUT_CHECK_FUNC_REGISTRY[func_name] = myf return myf - if f: - return register(f) - return register + return register(f) if f else register def prepare_input_map(args): @@ -804,18 +803,18 @@ def prepare_input_map(args): global TASK_INPUT_CHECK_FUNC_REGISTRY # A dict that maps the input tensor arg to a buffer name - tensor_input_map = {} + tensor_input_map = { + arg: arg.op.name + for arg in args + if isinstance(arg.op, tvm.te.PlaceholderOp) + and arg.op.name != "placeholder" + } - # Case 0: Check placeholder name - for arg in args: - if isinstance(arg.op, tvm.te.PlaceholderOp): - if arg.op.name != "placeholder": - tensor_input_map[arg] = arg.op.name # Case 1: Check specific tensor inputs for func_name in TASK_INPUT_CHECK_FUNC_REGISTRY: func = TASK_INPUT_CHECK_FUNC_REGISTRY[func_name] - tensor_input_map.update(func(args)) + tensor_input_map |= func(args) return tensor_input_map @@ -851,16 +850,16 @@ def prepare_runner_args(inp, build_res): for arg in build_res.args: if arg in tensor_input_map: tensor_name = tensor_input_map[arg] - if tensor_name in task_input_names: - task_input_buffer = get_task_input_buffer(inp.task.workload_key, tensor_name) - # convert tvm.NDArray to picklable numpy.ndarray - args.append(task_input_buffer.numpy()) - task_inputs_count += 1 - else: + if tensor_name not in task_input_names: raise ValueError( - "%s not found in task_inputs, " % (tensor_name) + f"{tensor_name} not found in task_inputs, " + "should provide with `SearchTask(..., task_inputs={...})`" ) + + task_input_buffer = get_task_input_buffer(inp.task.workload_key, tensor_name) + # convert tvm.NDArray to picklable numpy.ndarray + args.append(task_input_buffer.numpy()) + task_inputs_count += 1 else: args.append(None) if task_inputs_count != len(task_input_names): @@ -1143,10 +1142,9 @@ def _rpc_run( # clean up remote files remote.remove(build_res.filename) - remote.remove(os.path.splitext(build_res.filename)[0] + ".so") + remote.remove(f"{os.path.splitext(build_res.filename)[0]}.so") remote.remove("") dev.free_raw_stream(stream) - # pylint: disable=broad-except except Exception: dev.free_raw_stream(stream) costs = (MAX_FLOAT,) diff --git a/python/tvm/auto_scheduler/measure_record.py b/python/tvm/auto_scheduler/measure_record.py index aaa8fdcd91..472aa47b4f 100644 --- a/python/tvm/auto_scheduler/measure_record.py +++ b/python/tvm/auto_scheduler/measure_record.py @@ -120,8 +120,9 @@ def read_lines(self, max_lines=None, skip_lines=0): to rebuild these fields. """ inputs, results = _ffi_api.RecordReaderReadLines( - self, max_lines if max_lines else -1, skip_lines + self, max_lines or -1, skip_lines ) + self.check_workload_key(inputs) return inputs, results @@ -353,7 +354,7 @@ def main(): logger.setLevel(logging.INFO) if args.mode == "distill": - args.output = args.output or args.input + ".best.json" + args.output = args.output or f"{args.input}.best.json" distill_record_file(args.input, args.output) diff --git a/python/tvm/auto_scheduler/relay_integration.py b/python/tvm/auto_scheduler/relay_integration.py index 9541232a6a..2e6b0e65f5 100644 --- a/python/tvm/auto_scheduler/relay_integration.py +++ b/python/tvm/auto_scheduler/relay_integration.py @@ -280,7 +280,10 @@ def traverse(t): if isinstance(t.op, PlaceholderOp): inputs.append(t) elif isinstance(t.op, ComputeOp): - has_complex_op = has_complex_op or any([isinstance(e, Reduce) for e in t.op.body]) + has_complex_op = has_complex_op or any( + isinstance(e, Reduce) for e in t.op.body + ) + if "layout_free_placeholders" in t.op.attrs: layout_free_ops.append(t.op) for x in t.op.input_tensors: @@ -293,7 +296,7 @@ def traverse(t): io_tensors = inputs + list(outs) for tensor in io_tensors: # Reject the compute if any of its I/O tensors has dynamic shape. - if any([not isinstance(v, int) for v in get_const_tuple(tensor.shape)]): + if any(not isinstance(v, int) for v in get_const_tuple(tensor.shape)): return ([], False, False) return (io_tensors, len(layout_free_ops) > 0, has_complex_op) @@ -356,8 +359,7 @@ def auto_schedule_topi(func_name, outs): # in the task extraction mode if has_complex_op or env.tracing_mode == TracingMode.EXTRACT_TASK: env.add_workload_key(func_name, key) - input_map = prepare_input_map(io_tensors) - if input_map: + if input_map := prepare_input_map(io_tensors): env.add_workload_input_names(key, list(input_map.values())) elif env.tracing_mode == TracingMode.PREPARE_LAYOUT_REWRITE: # in prepare_layout_rewrite mode @@ -374,7 +376,7 @@ def auto_schedule_topi(func_name, outs): if new_key != key: dispatch_ctx.update(target, new_key, state) else: - raise ValueError("Invalid tracing mode: " + env.tracing_mode) + raise ValueError(f"Invalid tracing mode: {env.tracing_mode}") return schedule @@ -457,9 +459,11 @@ def rewrite_compute_body(compute_tensor, new_layout): placeholder_op = layout_free_placeholders[0].op # Rewrite the index expression in body - body = [] - for b in op.body: - body.append(_ffi_api.RewriteIndexForNewLayout(placeholder_op, new_layout, b)) + body = [ + _ffi_api.RewriteIndexForNewLayout(placeholder_op, new_layout, b) + for b in op.body + ] + op_node = tvm.te._ffi_api.ComputeOp(op.name, op.tag, op.attrs, op.axis, body) num = op_node.num_outputs diff --git a/python/tvm/auto_scheduler/search_policy.py b/python/tvm/auto_scheduler/search_policy.py index a88c1305b5..81f79271c7 100644 --- a/python/tvm/auto_scheduler/search_policy.py +++ b/python/tvm/auto_scheduler/search_policy.py @@ -251,8 +251,7 @@ def sample_initial_population(self): states: List[State] The sampled states """ - states = _ffi_api.SketchPolicySampleInitialPopulation(self) - return states + return _ffi_api.SketchPolicySampleInitialPopulation(self) def evolutionary_search(self, init_populations, out_size): """Perform evolutionary search. @@ -271,5 +270,6 @@ def evolutionary_search(self, init_populations, out_size): states: List[State] The generated states """ - states = _ffi_api.SketchPolicyEvolutionarySearch(self, init_populations, out_size) - return states + return _ffi_api.SketchPolicyEvolutionarySearch( + self, init_populations, out_size + ) diff --git a/python/tvm/auto_scheduler/search_task.py b/python/tvm/auto_scheduler/search_task.py index ab03ff9f8e..5b0bd3bcbc 100644 --- a/python/tvm/auto_scheduler/search_task.py +++ b/python/tvm/auto_scheduler/search_task.py @@ -125,18 +125,7 @@ def __init__( def __str__(self): """Pretty printing for hardware parameter configuration.""" - format_str = ( - "HardwareParams:\n" - f" num_cores: {self.num_cores}\n" - f" vector_unit_bytes: {self.vector_unit_bytes}\n" - f" cache_line_bytes: {self.cache_line_bytes}\n" - f" max_shared_memory_per_block: {self.max_shared_memory_per_block}\n" - f" max_local_memory_per_block: {self.max_local_memory_per_block}\n" - f" max_threads_per_block: {self.max_threads_per_block}\n" - f" max_vthread_extent: {self.max_vthread_extent}\n" - f" warp_size: {self.warp_size}\n" - ) - return format_str + return f"HardwareParams:\n num_cores: {self.num_cores}\n vector_unit_bytes: {self.vector_unit_bytes}\n cache_line_bytes: {self.cache_line_bytes}\n max_shared_memory_per_block: {self.max_shared_memory_per_block}\n max_local_memory_per_block: {self.max_local_memory_per_block}\n max_threads_per_block: {self.max_threads_per_block}\n max_vthread_extent: {self.max_vthread_extent}\n warp_size: {self.warp_size}\n" @tvm._ffi.register_object("auto_scheduler.TuningOptions") @@ -183,7 +172,7 @@ def __init__( if builder == "local": builder = LocalBuilder() else: - raise ValueError("Invalid builder: " + builder) + raise ValueError(f"Invalid builder: {builder}") elif not isinstance(builder, tvm.auto_scheduler.measure.ProgramBuilder): raise ValueError( "Invalid builder: " @@ -195,12 +184,13 @@ def __init__( if runner == "local": runner = LocalRunner() else: - raise ValueError("Invalid runner: " + runner) + raise ValueError(f"Invalid runner: {runner}") elif not isinstance(runner, tvm.auto_scheduler.measure.ProgramRunner): raise ValueError( - "Invalid runner: " + runner + " . TuningOptions expects a ProgramRunner or string." + f"Invalid runner: {runner} . TuningOptions expects a ProgramRunner or string." ) + self.__init_handle_by_constructor__( _ffi_api.TuningOptions, num_measure_trials, @@ -239,7 +229,7 @@ def _save_buffer_to_file(buffer_name, buffer_data): buffer_name += "." for i in np_data.shape: buffer_name += "%d_" % (i) - buffer_name += "%s" % (np_data.dtype) + buffer_name += f"{np_data.dtype}" buffer_name += ".npy" np_data.tofile(buffer_name, " ") @@ -253,7 +243,7 @@ def _try_load_buffer_from_file(buffer_name): filelist = os.listdir() for file in filelist: - if file.startswith(buffer_name + "."): + if file.startswith(f"{buffer_name}."): meta_info = file.split(".")[-2].split("_") shape = [int(i) for i in meta_info[:-1]] dtype = meta_info[-1] @@ -305,16 +295,14 @@ def register_task_input_buffer( if not overwrite: if input_name not in input_table.keys(): - # Try to load buffer data from local file - tensor_from_file = _try_load_buffer_from_file(input_name) - if tensor_from_file: + if tensor_from_file := _try_load_buffer_from_file(input_name): input_table[input_name] = tensor_from_file elif input_name in input_table.keys(): raise RuntimeError( - "Tensor %s exists in TASK_INPUT_BUFFER_TABLE, %s" - % (input_name, "set overwrite to True or this Tensor will not be registered") + f"Tensor {input_name} exists in TASK_INPUT_BUFFER_TABLE, set overwrite to True or this Tensor will not be registered" ) + input_table[input_name] = input_data if save_to_file: _save_buffer_to_file(input_name, input_data) @@ -346,9 +334,7 @@ def get_task_input_buffer(workload_key, input_name): input_table = TASK_INPUT_BUFFER_TABLE[workload_key] if input_name not in input_table: - # Try to load buffer data from local file - tensor_from_file = _try_load_buffer_from_file(input_name) - if tensor_from_file: + if tensor_from_file := _try_load_buffer_from_file(input_name): input_table[input_name] = tensor_from_file # Then check for the default table, the input names extracted from a relay model will be @@ -360,8 +346,10 @@ def get_task_input_buffer(workload_key, input_name): return input_table[input_name] raise ValueError( - "%s not found in TASK_INPUT_BUFFER_TABLE, " % (input_name) - + "should provide with `SearchTask(..., task_inputs={...})`" + ( + f"{input_name} not found in TASK_INPUT_BUFFER_TABLE, " + + "should provide with `SearchTask(..., task_inputs={...})`" + ) ) @@ -519,9 +507,10 @@ def apply_best(self, log_file, include_compatible=False, layout_rewrite_option=N ) if inp is None: raise RuntimeError( - "Cannot find any valid schedule for %s in file %s" % (self.workload_key, log_file) + f"Cannot find any valid schedule for {self.workload_key} in file {log_file}" ) + sch, args = self.compute_dag.apply_steps_from_state( inp.state, layout_rewrite_option or self.layout_rewrite_option ) @@ -546,9 +535,10 @@ def print_best(self, log_file, print_mode="schedule"): inp, _ = load_best_record(log_file, self.workload_key) if inp is None: raise RuntimeError( - "Cannot find any valid schedule for %s in file %s" % (self.workload_key, log_file) + f"Cannot find any valid schedule for {self.workload_key} in file {log_file}" ) + if print_mode == "schedule": return self.compute_dag.print_python_code_from_state(inp.state) if print_mode == "cuda": @@ -556,7 +546,7 @@ def print_best(self, log_file, print_mode="schedule"): sch, args = self.compute_dag.apply_steps_from_state(inp.state) func = build(sch, args, "cuda") return func.imported_modules[0].get_source() - raise ValueError("Invalid print_mode: %s" % print_mode) + raise ValueError(f"Invalid print_mode: {print_mode}") def __getstate__(self): self.target, self.target_host = Target.canon_target_and_host(self.target, self.target_host) @@ -576,7 +566,7 @@ def __setstate__(self, state): try: workload = json.loads(state["workload_key"]) except Exception: # pylint: disable=broad-except - raise RuntimeError("Invalid workload key %s" % state["workload_key"]) + raise RuntimeError(f'Invalid workload key {state["workload_key"]}') # workload[0] is either the compute function name or the ComputeDAG hash. # The compute functions are already registered when importing TVM, so here diff --git a/python/tvm/auto_scheduler/task_scheduler.py b/python/tvm/auto_scheduler/task_scheduler.py index c23c9b3c0c..104a5c7afc 100644 --- a/python/tvm/auto_scheduler/task_scheduler.py +++ b/python/tvm/auto_scheduler/task_scheduler.py @@ -100,34 +100,31 @@ def make_search_policies( elif model_type == "random": cost_model = RandomModel() else: - raise ValueError("Invalid search policy: " + search_policy) + raise ValueError(f"Invalid search policy: {search_policy}") + + if policy_type != "sketch": + raise ValueError(f"Invalid search policy: {search_policy}") + init_search_callbacks = ( + [PreloadMeasuredStates(load_log_file)] if load_log_file else None + ) + + return [ + SketchPolicy( + task, + cost_model, + params=search_policy_params, + verbose=verbose, + init_search_callbacks=init_search_callbacks, + ) + for task in tasks + ] - if policy_type == "sketch": - if load_log_file: - # use the log file to restore the status of search policies. - init_search_callbacks = [PreloadMeasuredStates(load_log_file)] - else: - init_search_callbacks = None - search_policies = [ - SketchPolicy( - task, - cost_model, - params=search_policy_params, - verbose=verbose, - init_search_callbacks=init_search_callbacks, - ) - for task in tasks - ] - else: - raise ValueError("Invalid search policy: " + search_policy) else: # check type assert isinstance(search_policy, (tuple, list)) for item in search_policy: assert isinstance(item, SearchPolicy) - search_policies = search_policy - - return search_policies + return search_policy def derive_similarity_tag(dag, log_base=1.618): @@ -152,8 +149,7 @@ def derive_similarity_tag(dag, log_base=1.618): """ ret = "" for op in dag.ops: - tag = op.attrs.get("auto_scheduler_task_scheduler_tag", None) - if tag: + if tag := op.attrs.get("auto_scheduler_task_scheduler_tag", None): ret += op.attrs["auto_scheduler_task_scheduler_tag"] + "_" if ret: ret += "%d" % int(math.log(dag.flop_ct + 1, log_base)) @@ -221,11 +217,10 @@ def __init__( self.tasks = tasks if objective_func: # use custom objective function self.objective_func = objective_func - else: # use weighted sum - if task_weights: - self.objective_func = lambda costs: sum(c * w for c, w in zip(costs, task_weights)) - else: - self.objective_func = sum + elif task_weights: + self.objective_func = lambda costs: sum(c * w for c, w in zip(costs, task_weights)) + else: + self.objective_func = sum self.strategy = strategy self.load_log_file = load_log_file @@ -356,10 +351,9 @@ def tune( if not self.task_cts[idx]: self._tune_task(idx) self.best_ct = self.ct - self.best_score = self.cur_score - # use the specific strategy to choose workload to tune task_idx = -1 + self.best_score = self.cur_score while self.ct < tune_option.num_measure_trials and len(self.dead_tasks) < len(self.tasks): if self.strategy == "round-robin": task_idx = (task_idx + 1) % len(self.tasks) @@ -401,11 +395,10 @@ def tune( group_id = self.tag_to_group_id.get(self.task_tags[i], None) if group_id is not None and len(self.group_task_ids[group_id]) > 1: best_flops = max( - [ - self.flop_cts[j] / self.best_costs[j] - for j in self.group_task_ids[group_id] - ] + self.flop_cts[j] / self.best_costs[j] + for j in self.group_task_ids[group_id] ) + g_next_2 = self.beta * self.flop_cts[i] / best_flops g_next = min(g_next_1, g_next_2) @@ -423,7 +416,7 @@ def tune( else: task_idx = np.argmin(gradients) else: - raise ValueError("Invalid strategy: " + self.strategy) + raise ValueError(f"Invalid strategy: {self.strategy}") self._tune_task(task_idx) self._adjust_similarity_group(task_idx) @@ -436,10 +429,9 @@ def tune( ): if self.tune_option.verbose >= 1: print( - "Stop early since no performance improvement in the last " - + str(self.early_stopping_all) - + " measurement trials." + f"Stop early since no performance improvement in the last {str(self.early_stopping_all)} measurement trials." ) + break def _tune_task(self, task_idx): @@ -491,7 +483,10 @@ def _adjust_similarity_group(self, task_idx): return group_ids = self.group_task_ids[group_id] - best_group_flops = max([self.flop_cts[j] / self.best_costs[j] for j in group_ids]) + best_group_flops = max( + self.flop_cts[j] / self.best_costs[j] for j in group_ids + ) + cur_flops = self.flop_cts[task_idx] / self.best_costs[task_idx] # if we tune a task for many times but it still cannot achieve @@ -513,7 +508,7 @@ def _restore_status(self, log_file, num_measures_per_round): for total_ct, (inp, res) in enumerate(RecordReader(log_file)): if str(inp.task.target) != str_target: continue - task_idx = workload_key_to_task_id.get(inp.task.workload_key, None) + task_idx = workload_key_to_task_id.get(inp.task.workload_key) if task_idx is None: continue diff --git a/python/tvm/auto_scheduler/utils.py b/python/tvm/auto_scheduler/utils.py index 9919bcb470..c3511ace71 100644 --- a/python/tvm/auto_scheduler/utils.py +++ b/python/tvm/auto_scheduler/utils.py @@ -207,7 +207,7 @@ def serialize_args(args): elif isinstance(t, list): t = list_to_tuple(t) - assert isinstance(t, Hashable), str(t) + " is not hashable" + assert isinstance(t, Hashable), f"{str(t)} is not hashable" ret.append(t) return tuple(ret) @@ -327,8 +327,7 @@ def request_remote(device_key, host=None, port=None, priority=1, timeout=60): port = port or int(os.environ["TVM_TRACKER_PORT"]) tracker = rpc.connect_tracker(host, port) - remote = tracker.request(device_key, priority=priority, session_timeout=timeout) - return remote + return tracker.request(device_key, priority=priority, session_timeout=timeout) def check_remote(device_key, host=None, port=None, priority=100, timeout=10): @@ -409,4 +408,4 @@ def to_str_round(x, decimal=6): if isinstance(x, (np.float32, np.float64, float)): format_str = "%%.%df" % decimal return format_str % x - raise ValueError("Invalid value: " + str(x) + "\ttype: " + str(type(x))) + raise ValueError(f"Invalid value: {str(x)}" + "\ttype: " + str(type(x))) diff --git a/python/tvm/auto_scheduler/workload_registry.py b/python/tvm/auto_scheduler/workload_registry.py index 17d2001e3a..58ad00188a 100644 --- a/python/tvm/auto_scheduler/workload_registry.py +++ b/python/tvm/auto_scheduler/workload_registry.py @@ -91,13 +91,11 @@ def matmul(N, M, K): def register(myf): """internal register function""" if func_name in WORKLOAD_FUNC_REGISTRY and not override: - raise RuntimeError("%s has been registered already" % func_name) + raise RuntimeError(f"{func_name} has been registered already") WORKLOAD_FUNC_REGISTRY[func_name] = myf return myf - if f: - return register(f) - return register + return register(f) if f else register def register_workload_tensors(workload_key, tensors, override=True): @@ -151,12 +149,13 @@ def make_workload_key(func, args): + " . `make_workload_key` expects a callable function or its function name" ) - if not func_name in WORKLOAD_FUNC_REGISTRY: + if func_name not in WORKLOAD_FUNC_REGISTRY: raise ValueError( - "%s is not registered. " % func, + f"{func} is not registered. ", "Please register it with @auto_scheduler.register_workload", ) + args = serialize_args(args) return json.dumps((func_name,) + args) diff --git a/python/tvm/autotvm/database.py b/python/tvm/autotvm/database.py index 4c4ed64923..86bc9fece0 100644 --- a/python/tvm/autotvm/database.py +++ b/python/tvm/autotvm/database.py @@ -82,8 +82,8 @@ def filter_inputs(db, measure_inputs, retry=False): unsaved: Array of MeasureInput a list that only contains unsaved inputs """ - partial_results = list() - unsaved = list() + partial_results = [] + unsaved = [] for inp in measure_inputs: res = db.load(inp) if res is None or (retry and res.error_no != 0): @@ -164,7 +164,7 @@ def filter(self, func): get records with errors >>> db.filter(lambda inp, results: any(r.error_no != 0 for r in results)) """ - matched_records = list() + matched_records = [] # may consider filtering in iterator in the future for key in self.db.keys(): current = self.get(key) diff --git a/python/tvm/autotvm/feature.py b/python/tvm/autotvm/feature.py index f73c65fbd1..a0ed7177b2 100644 --- a/python/tvm/autotvm/feature.py +++ b/python/tvm/autotvm/feature.py @@ -89,10 +89,8 @@ def get_itervar_feature(sch, args, take_log=False): # convert tvm node to python type ret = [] for row in feas: - tmp = [] - tmp.append([row[0][0].value, row[0][1]]) - for item in row[1:]: - tmp.append([item[0].value] + [x.value for x in item[1:]]) + tmp = [[row[0][0].value, row[0][1]]] + tmp.extend([item[0].value] + [x.value for x in item[1:]] for item in row[1:]) ret.append(tmp) return ret @@ -112,8 +110,7 @@ def flatten_itervar_feature(fea): """ flatten = [] for axis in fea: - for pair in axis[1:]: - flatten.append(pair[1:]) + flatten.extend(pair[1:] for pair in axis[1:]) return np.concatenate(flatten) @@ -181,11 +178,7 @@ def get_flatten_name(fea): var_name = str(row[0][1]) for pair in row[1:]: key = pair[0] - if key in feature_name: - name_list = feature_name[key] - else: - name_list = feature_name["buf_touch"] - + name_list = feature_name.get(key, feature_name["buf_touch"]) for i in range(len((pair[1:]))): names.append(".".join(["f%d" % ct, var_name, key, name_list[i]])) ct += 1 diff --git a/python/tvm/autotvm/graph_tuner/base_graph_tuner.py b/python/tvm/autotvm/graph_tuner/base_graph_tuner.py index d4054bbd37..01a6451a03 100644 --- a/python/tvm/autotvm/graph_tuner/base_graph_tuner.py +++ b/python/tvm/autotvm/graph_tuner/base_graph_tuner.py @@ -49,7 +49,7 @@ def get_infer_layout(task_name): return topi.nn.conv2d_infer_layout if task_name.startswith("depthwise_conv2d"): return topi.nn.depthwise_conv2d_infer_layout - raise ValueError("Cannot find infer layout for task %s" % task_name) + raise ValueError(f"Cannot find infer layout for task {task_name}") @autotvm.template("layout_transform") @@ -137,12 +137,12 @@ def __init__( # Set up logger self._verbose = verbose - self._logger = logging.getLogger(name + "_logger") + self._logger = logging.getLogger(f"{name}_logger") need_file_handler = need_console_handler = True for handler in self._logger.handlers: if handler.__class__.__name__ == "FileHandler": need_file_handler = False - if handler.__class__.__name__ == "StreamHandler": + elif handler.__class__.__name__ == "StreamHandler": need_console_handler = False self._log_level = log_level self._log_file = log_file @@ -163,20 +163,19 @@ def __init__( if isinstance(graph, tvm.IRModule): graph = graph["main"] - if isinstance(graph, relay.function.Function): - node_dict = {} - graph = bind_inputs(graph, input_shapes, dtype) - expr2graph(graph, self._target_ops, node_dict, self._node_list, target) - else: - raise RuntimeError("Unsupported graph type: %s" % str(type(graph))) + if not isinstance(graph, relay.function.Function): + raise RuntimeError(f"Unsupported graph type: {str(type(graph))}") + node_dict = {} + graph = bind_inputs(graph, input_shapes, dtype) + expr2graph(graph, self._target_ops, node_dict, self._node_list, target) self._graph = graph self._in_nodes_dict = get_in_nodes(self._node_list, self._target_ops, input_shapes.keys()) if len(self._in_nodes_dict) == 0: raise RuntimeError( - "Could not find any input nodes with whose " - "operator is one of %s" % self._target_ops + f"Could not find any input nodes with whose operator is one of {self._target_ops}" ) + self._out_nodes_dict = get_out_nodes(self._in_nodes_dict) self._fetch_cfg() self._opt_out_op = OPT_OUT_OP @@ -202,7 +201,7 @@ def __init__( input_workload = input_node["workloads"][0] first_tensor = input_workload[1] dtype = first_tensor[-1] - new_shape = tuple([val.value for val in node_entry["types"][0].shape]) + new_shape = tuple(val.value for val in node_entry["types"][0].shape) actual_workload = ( (input_workload[0],) + (("TENSOR", new_shape, dtype),) @@ -238,7 +237,6 @@ def _fetch_cfg(self): if workload in cache_dict: node_entry["record_candidates"] = cache_dict[workload] continue - record_candidates = [] infer_layout_func = get_infer_layout(node_entry["topi_op"][0]) layout_tracking_dict = {} for record in cfg_dict[workload]: @@ -259,8 +257,11 @@ def _fetch_cfg(self): sorted_records = sorted( layout_tracking_dict.values(), key=lambda item: item[1].costs[0] ) - for i in range(min(self._max_sch_num, len(sorted_records))): - record_candidates.append(sorted_records[i]) + record_candidates = [ + sorted_records[i] + for i in range(min(self._max_sch_num, len(sorted_records))) + ] + node_entry["record_candidates"] = record_candidates cache_dict[workload] = record_candidates @@ -519,11 +520,7 @@ def _callback(_, inputs, results): for i in topi.utils.get_const_tuple(out.shape): out_flops *= i - if flops != out_flops: - inferred_time = INVALID_LAYOUT_TIME - else: - inferred_time = flops * avg_time - + inferred_time = INVALID_LAYOUT_TIME if flops != out_flops else flops * avg_time record_input = MeasureInput(target=self._target, task=None, config=None) record_output = MeasureResult( costs=(inferred_time,), error_no=0, all_cost=-1, timestamp=-1 @@ -583,7 +580,7 @@ def write_opt_sch2record_file(self, record_file="graph_opt_schedule.log"): records = self.get_optimal_records() for record in records: out_file.write(encode(record[0], record[1]) + "\n") - msg = "Writing optimal schedules to %s successfully." % record_file + msg = f"Writing optimal schedules to {record_file} successfully." self._logger.info(msg) @abstractmethod diff --git a/python/tvm/autotvm/graph_tuner/dynamic_programming_stage.py b/python/tvm/autotvm/graph_tuner/dynamic_programming_stage.py index 2d7560272e..711dd79705 100644 --- a/python/tvm/autotvm/graph_tuner/dynamic_programming_stage.py +++ b/python/tvm/autotvm/graph_tuner/dynamic_programming_stage.py @@ -184,9 +184,7 @@ def _create_op_states(self): for child in self._global_out_nodes_dict[self._idx]: self._global_dep_dict[dep_idx].add(child) if len(self._global_out_nodes_dict[self._idx]) > 1: - self._global_dep_dict[self._idx] = set() - for child in self._global_out_nodes_dict[self._idx]: - self._global_dep_dict[self._idx].add(child) + self._global_dep_dict[self._idx] = set(self._global_out_nodes_dict[self._idx]) def _create_multi_inputs_states(self): """State creation routine for multi_input operator @@ -235,11 +233,7 @@ def _create_multi_inputs_states(self): target_sch_idx = ( i % (target_multiplier * aligned_shape[target_major_axis]) ) // target_multiplier - if node_time_counted[0]: - new_state = 0 - else: - new_state = target_states[i] - + new_state = 0 if node_time_counted[0] else target_states[i] for j in range(1, len(states_list)): src_states = src_states_list[j - 1] src_node_idx, src_major_axis, src_multiplier, _ = states_list[j] @@ -287,9 +281,7 @@ def _create_multi_inputs_states(self): for child in self._global_out_nodes_dict[self._idx]: self._global_dep_dict[dep].add(child) if len(self._global_out_nodes_dict[self._idx]) > 1: - self._global_dep_dict[self._idx] = set() - for child in self._global_out_nodes_dict[self._idx]: - self._global_dep_dict[self._idx].add(child) + self._global_dep_dict[self._idx] = set(self._global_out_nodes_dict[self._idx]) @property def dep(self): @@ -343,9 +335,10 @@ def align_states(input_index_list, stage_dict, node_list): for dep_idx in input_node_stage.dep: if dep_idx not in aligned_node_list: aligned_node_list.append(dep_idx) - aligned_shape = [] - for idx in aligned_node_list: - aligned_shape.append(len(node_list[idx]["record_candidates"])) + aligned_shape = [ + len(node_list[idx]["record_candidates"]) for idx in aligned_node_list + ] + for input_idx in input_index_list: input_node_stage = stage_dict[input_idx] input_node_shape_idx_list = [input_idx] + input_node_stage.dep diff --git a/python/tvm/autotvm/graph_tuner/dynamic_programming_tuner.py b/python/tvm/autotvm/graph_tuner/dynamic_programming_tuner.py index 97253e406b..c7affe18a6 100644 --- a/python/tvm/autotvm/graph_tuner/dynamic_programming_tuner.py +++ b/python/tvm/autotvm/graph_tuner/dynamic_programming_tuner.py @@ -62,13 +62,15 @@ def __init__(self, *args, **kwargs): def _check_num_states(self, num_states): """Track the number of states.""" self._num_states += num_states - if self._max_num_states is not None: - if self._num_states > self._max_num_states: - raise RuntimeError( - "Too many states detected while running dynamic " - "programming: got %d states but upper limit is %d." - % (self._num_states, self._max_num_states) - ) + if ( + self._max_num_states is not None + and self._num_states > self._max_num_states + ): + raise RuntimeError( + "Too many states detected while running dynamic " + "programming: got %d states but upper limit is %d." + % (self._num_states, self._max_num_states) + ) def _forward(self): """Forward pass in DP to generate states for all stages.""" @@ -83,22 +85,12 @@ def _backward(self): """Backward pass in DP to generate optimal solution.""" self._logger.info("Start backward pass...") input_names = self._input_shapes.keys() - optimal_record_dict = {} # Pick optimal schedule for output nodes - output_idx_list = [] - for key, val in self._out_nodes_dict.items(): - if not val: - output_idx_list.append(key) - + output_idx_list = [key for key, val in self._out_nodes_dict.items() if not val] # Restrict number of output nodes to avoid numpy reshape error if len(output_idx_list) > MAX_OUTPUT_NODES: - msg = ( - "The number of outputs in graph is larger than upper " - "limit: %s vs %s. Usually this is caused by too many " - "LAYOUT_FIXED_OP in graph. Switch to greedily select schedule." - "No action required at this moment. We will continuously improve graph tuner" - % (len(output_idx_list), MAX_OUTPUT_NODES) - ) + msg = f"The number of outputs in graph is larger than upper limit: {len(output_idx_list)} vs {MAX_OUTPUT_NODES}. Usually this is caused by too many LAYOUT_FIXED_OP in graph. Switch to greedily select schedule.No action required at this moment. We will continuously improve graph tuner" + self._logger.warning(msg) self._optimal_record_dict = {key: 0 for key in self._in_nodes_dict} return @@ -109,18 +101,19 @@ def _backward(self): num_states = states_list[0][3].size self._check_num_states(num_states * len(output_idx_list)) aligned_node_shape = states_list[0][3].shape - min_time = 0 min_pos = -1 - for states in states_list: - min_time += np.amax(states[3]) + min_time = sum(np.amax(states[3]) for states in states_list) flatten_states_list = [current_states[3].flatten() for current_states in states_list] for i in range(num_states): - current_time = 0 - for j, current_states in enumerate(states_list): - current_time += flatten_states_list[j][i] + current_time = sum( + flatten_states_list[j][i] + for j, current_states in enumerate(states_list) + ) + if min_time > current_time: min_time = current_time min_pos = i + optimal_record_dict = {} for i, states in enumerate(states_list): current_major_axis = states[1] current_sch_idx = ( @@ -158,7 +151,7 @@ def _backward(self): bfs_q.put(input_idx) if input_idx not in optimal_record_dict: dep_list = self._stage_dict[node_idx].dep - dep_idx = tuple([optimal_record_dict[item] for item in dep_list]) + dep_idx = tuple(optimal_record_dict[item] for item in dep_list) tmp = np.argmin(full_states, axis=1) optimal_input_sch_idx = tmp[(optimal_sch_idx,) + dep_idx] optimal_record_dict[input_idx] = optimal_input_sch_idx @@ -178,7 +171,7 @@ def _backward(self): new_states_pos.append(i - 1) if visited_states_idx: tmp = np.transpose(tmp, tuple(visited_states_pos + new_states_pos)) - tmp = tmp[tuple([optimal_record_dict[idx] for idx in visited_states_idx])] + tmp = tmp[tuple(optimal_record_dict[idx] for idx in visited_states_idx)] min_pos = np.argmin(tmp) multiplier = 1 for i in range(len(new_states_idx)): diff --git a/python/tvm/autotvm/graph_tuner/pbqp_tuner.py b/python/tvm/autotvm/graph_tuner/pbqp_tuner.py index 59f4ef0879..bf4b8ded29 100644 --- a/python/tvm/autotvm/graph_tuner/pbqp_tuner.py +++ b/python/tvm/autotvm/graph_tuner/pbqp_tuner.py @@ -44,17 +44,19 @@ def __init__(self, *args, **kwargs): for out_node_idx in self._out_nodes_dict[node_idx]: self._in_nodes_dict[out_node_idx].remove(node_idx) - self._adj_dict = {} - for node_idx in self._in_nodes_dict: - self._adj_dict[node_idx] = list(self._in_nodes_dict[node_idx]) + list( - self._out_nodes_dict[node_idx] - ) - - self._record_cost_dict = {} - for key in self._in_nodes_dict: - self._record_cost_dict[key] = [] - for record in self._node_list[key]["record_candidates"]: - self._record_cost_dict[key].append(record[1].costs[0]) + self._adj_dict = { + node_idx: list(self._in_nodes_dict[node_idx]) + + list(self._out_nodes_dict[node_idx]) + for node_idx in self._in_nodes_dict + } + + self._record_cost_dict = { + key: [ + record[1].costs[0] + for record in self._node_list[key]["record_candidates"] + ] + for key in self._in_nodes_dict + } self._max_degree = -1 self._node_degree_dict = {} @@ -129,7 +131,7 @@ def _RII_reduction(self, node_idx): ltf_matrix_y = self._layout_transform_interlayer_cost[(adj_node_y, node_idx)] delta_matrix = [[] for _ in range(len(ltf_matrix_x))] for i, cost_vec_x in enumerate(ltf_matrix_x): - for j, cost_vec_y in enumerate(ltf_matrix_y): + for cost_vec_y in ltf_matrix_y: min_cost = INVALID_LAYOUT_TIME for k in range(len(self._record_cost_dict[node_idx])): min_cost = min( @@ -284,5 +286,5 @@ def run(self, **kwargs): self._forward() self._backward() is_optimal = "optimal" if self._is_optimal else "sub-optimal" - msg = "Finished PBQPExecutor run. Got %s solution." % is_optimal + msg = f"Finished PBQPExecutor run. Got {is_optimal} solution." self._logger.info(msg) diff --git a/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py b/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py index 1f1e4edcd9..ac8e8e9efe 100644 --- a/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py +++ b/python/tvm/autotvm/graph_tuner/utils/traverse_graph.py @@ -89,7 +89,7 @@ def _replace_device_with_tracing(target): target = str(target) if "-device" in target: return re.sub("-device=[^\\-$]+", "-device=tracing ", target).strip(" ") - return target + " -device=tracing" + return f"{target} -device=tracing" def _expr2graph_impl(expr, target_ops, node_dict, node_list, tvm_target): @@ -119,9 +119,10 @@ def _traverse_expr(node): node_entry["types"].append(tupe_type) else: raise RuntimeError( - "Unsupported output type %s in operator %s" % (type(out_type), op.name) + f"Unsupported output type {type(out_type)} in operator {op.name}" ) + # Utilize tracing target to fetch workload with topo-order. # Since we only need workload, dummy target can be used to # create task. @@ -168,14 +169,15 @@ def _traverse_expr(node): else: node_entry["inputs"].append([in_node_idx, 0, 0]) elif isinstance(node, Constant): - node_entry["name"] = "Constant_" + str(node_index) + node_entry["name"] = f"Constant_{node_index}" node_entry["types"] = [node.checked_type] elif isinstance(node, tvm.ir.Op): return else: raise RuntimeError( - "Not supported relay node type in graph tuning: %s" % str(type(node)) + f"Not supported relay node type in graph tuning: {str(type(node))}" ) + node_dict[node] = node_index node_list.append(node_entry) @@ -292,11 +294,9 @@ def get_in_nodes(node_list, target_ops, input_names): # Remove empty nodes to ignore pre-computed sub-graph has_empty_node = True while has_empty_node: - empty_nodes = [] - for key, val in in_node_dict.items(): - if not val: - empty_nodes.append(key) - if empty_nodes: + if empty_nodes := [ + key for key, val in in_node_dict.items() if not val + ]: has_empty_node = True for node in empty_nodes: del in_node_dict[node] @@ -323,9 +323,7 @@ def get_out_nodes(in_node_dict): out : dict of int to list of int Dictionary maps node index to closest output nodes. """ - out_node_dict = {} - for key in in_node_dict: - out_node_dict[key] = [] + out_node_dict = {key: [] for key in in_node_dict} for key, val in in_node_dict.items(): for item in val: if item in out_node_dict: diff --git a/python/tvm/autotvm/graph_tuner/utils/utils.py b/python/tvm/autotvm/graph_tuner/utils/utils.py index 54e0d1cb36..b563a03566 100644 --- a/python/tvm/autotvm/graph_tuner/utils/utils.py +++ b/python/tvm/autotvm/graph_tuner/utils/utils.py @@ -91,10 +91,9 @@ def is_boundary_node(node_entry, input_names): ) ] - out = node_entry["op"] in _LAYOUT_FIXED_OP or ( + return node_entry["op"] in _LAYOUT_FIXED_OP or ( "name" in node_entry and node_entry["name"] in input_names ) - return out def is_skipped_node(node_entry): @@ -146,10 +145,12 @@ def bind_inputs(expr, input_shapes=None, input_dtypes="float32"): ) updated_input_dict[input_name] = updated_input - rebind_dict = {} - for var in expr.params: - if var.name_hint in updated_input_dict: - rebind_dict[var] = updated_input_dict[var.name_hint] + rebind_dict = { + var: updated_input_dict[var.name_hint] + for var in expr.params + if var.name_hint in updated_input_dict + } + updated_expr = relay.expr.bind(expr, rebind_dict) mod = tvm.IRModule.from_expr(updated_expr) diff --git a/python/tvm/autotvm/measure/measure.py b/python/tvm/autotvm/measure/measure.py index c9b82cd81c..f67b1347c0 100644 --- a/python/tvm/autotvm/measure/measure.py +++ b/python/tvm/autotvm/measure/measure.py @@ -244,21 +244,19 @@ def measure_option(builder, runner): if builder == "local": builder = LocalBuilder() else: - raise ValueError("Invalid builder: " + builder) + raise ValueError(f"Invalid builder: {builder}") if isinstance(runner, str): if runner == "local": runner = LocalRunner() else: - raise ValueError("Invalid runner: " + runner) + raise ValueError(f"Invalid runner: {runner}") - opt = { + return { "builder": builder, "runner": runner, } - return opt - def create_measure_batch(task, option): """Get a standard measure_batch function. diff --git a/python/tvm/autotvm/measure/measure_methods.py b/python/tvm/autotvm/measure/measure_methods.py index 8fc0da89c4..f5ae05c489 100644 --- a/python/tvm/autotvm/measure/measure_methods.py +++ b/python/tvm/autotvm/measure/measure_methods.py @@ -113,7 +113,7 @@ def __init__( elif build_func == "stackvm": build_func = stackvm.build else: - raise ValueError("Invalid build_func" + build_func) + raise ValueError(f"Invalid build_func{build_func}") self.build_func = _WrappedBuildFunc(build_func, runtime) if not do_fork: assert n_parallel in ( @@ -158,33 +158,30 @@ def build(self, measure_inputs): time.time(), ) - else: - if "InstantiationError" in str(exception): - msg = str(exception) - try: - msg = msg.split("\n")[-2].split(": ")[1] - except Exception: # pylint: disable=broad-except - pass - res = MeasureResult( - ( - tb, - InstantiationError(msg), - ), - MeasureErrorNo.INSTANTIATION_ERROR, - res.time_cost, - time.time(), - ) - - else: # tvm error - res = MeasureResult( - ( - tb, - res.error, - ), - MeasureErrorNo.COMPILE_HOST, - res.time_cost, - time.time(), - ) + elif "InstantiationError" in str(exception): + msg = str(exception) + with contextlib.suppress(Exception): + msg = msg.split("\n")[-2].split(": ")[1] + res = MeasureResult( + ( + tb, + InstantiationError(msg), + ), + MeasureErrorNo.INSTANTIATION_ERROR, + res.time_cost, + time.time(), + ) + + else: # tvm error + res = MeasureResult( + ( + tb, + res.error, + ), + MeasureErrorNo.COMPILE_HOST, + res.time_cost, + time.time(), + ) except TimeoutError as ex: tb = traceback.format_exc() res = MeasureResult( @@ -520,7 +517,7 @@ def _build_func_common(measure_input, runtime=None, check_gpu=None, build_option ) current_config = dict(current_pass_context.config) if build_option is not None: - current_config.update(build_option) + current_config |= build_option if "tir.add_lower_pass" in current_config: current_add_lower_pass = list(current_config["tir.add_lower_pass"]) @@ -702,8 +699,7 @@ def run_through_rpc( costs = time_f(*args).results if len(costs) > 2: # remove largest and smallest value to reduce variance - costs = list(costs) - costs.sort() + costs = sorted(costs) costs = tuple(costs[1:-1]) except TVMError as exc: msg = str(exc) @@ -740,7 +736,7 @@ def __call__(self, remote_kwargs, build_result): finally: # clean up remote files remote.remove(build_result.filename) - remote.remove(os.path.splitext(build_result.filename)[0] + ".so") + remote.remove(f"{os.path.splitext(build_result.filename)[0]}.so") remote.remove("") @@ -791,8 +787,7 @@ def request_remote(device_key, host=None, port=None, priority=1, timeout=60): port = port or int(os.environ["TVM_TRACKER_PORT"]) tracker = _rpc.connect_tracker(host, port) - remote = tracker.request(device_key, priority=priority, session_timeout=timeout) - return remote + return tracker.request(device_key, priority=priority, session_timeout=timeout) def check_remote(target, device_key, host=None, port=None, priority=100, timeout=10): diff --git a/python/tvm/autotvm/record.py b/python/tvm/autotvm/record.py index b2faee243b..b1cd74b893 100644 --- a/python/tvm/autotvm/record.py +++ b/python/tvm/autotvm/record.py @@ -119,7 +119,7 @@ def encode(inp, result, protocol="json"): ) return "\t".join(row) - raise RuntimeError("Invalid log protocol: " + protocol) + raise RuntimeError(f"Invalid log protocol: {protocol}") def decode(row, protocol="json"): @@ -154,19 +154,17 @@ def decode(row, protocol="json"): if "-target" in tgt: logger.warning('"-target" is deprecated, use "-mtriple" instead.') tgt = tgt.replace("-target", "-mtriple") - tgt = Target(str(tgt)) + tgt = Target(tgt) def clean_json_to_python(x): """1. Convert all list in x to tuple (hashable) 2. Convert unicode to str for python2 """ if isinstance(x, list): - return tuple([clean_json_to_python(a) for a in x]) + return tuple(clean_json_to_python(a) for a in x) if isinstance(x, _unicode): return str(x) - if isinstance(x, (_long, int)): - return int(x) - return x + return int(x) if isinstance(x, (_long, int)) else x tsk = task.Task(clean_json_to_python(task_name), clean_json_to_python(task_args)) config = ConfigEntity.from_json_dict(row["config"]) @@ -191,7 +189,7 @@ def clean_json_to_python(x): tsk = task.Task(task_tuple[0], task_tuple[1]) return MeasureInput(tgt, tsk, config), result - raise RuntimeError("Invalid log protocol: " + protocol) + raise RuntimeError(f"Invalid log protocol: {protocol}") def load_from_file(filename): @@ -287,10 +285,7 @@ def pick_best(in_file, out_file): context = itertools.chain(context, out_context) context, context_clone = itertools.tee(context) best_context = ApplyHistoryBest(context) - best_set = set() - - for v in best_context.best_by_model.values(): - best_set.add(measure_str_key(v[0])) + best_set = {measure_str_key(v[0]) for v in best_context.best_by_model.values()} for v in best_context.best_by_targetkey.values(): best_set.add(measure_str_key(v[0])) @@ -317,6 +312,7 @@ def pick_best(in_file, out_file): * Split a log file into separate files, each of which contains only a single wkl e.g. python -m tvm.autotvm.record --mode split --i collect.log """ + if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--mode", choices=["read", "pick", "split"], default="read") @@ -331,7 +327,7 @@ def pick_best(in_file, out_file): logging.basicConfig(level=logging.INFO) if args.mode == "pick": - args.o = args.o or args.i + ".best.log" + args.o = args.o or f"{args.i}.best.log" pick_best(args.i, args.o) elif args.mode == "read": for i, (inp, result) in enumerate(load_from_file(args.i)): diff --git a/python/tvm/autotvm/task/dispatcher.py b/python/tvm/autotvm/task/dispatcher.py index 11a608d4cb..a242d6afef 100644 --- a/python/tvm/autotvm/task/dispatcher.py +++ b/python/tvm/autotvm/task/dispatcher.py @@ -201,7 +201,7 @@ def __init__(self, tasks, schedule_names: Union[str, typing.List[str]]): elif isinstance(schedule_names, list): self._schedule_names = schedule_names else: - raise RuntimeError("Incorrect type: " + schedule_names) + raise RuntimeError(f"Incorrect type: {schedule_names}") self._tasks = tasks self.workload = None @@ -217,13 +217,11 @@ def _query_inside(self, target, workload): if not config: raise RuntimeError( - "workload: %s does not exist in %s" % (str(workload), str(self._tasks)) + f"workload: {str(workload)} does not exist in {str(self._tasks)}" ) + # Add low cost to the target schedule and high cost to others. - if workload[0] in self._schedule_names: - config.cost = 1e-6 - else: - config.cost = 100000 + config.cost = 1e-6 if workload[0] in self._schedule_names else 100000 return config def update(self, target, workload, cfg): @@ -284,9 +282,8 @@ def load(self, records): if isinstance(rec, str): rec = load_from_file(rec) joint_records += rec - else: - if rec is not None: - joint_records.append(rec) + elif rec is not None: + joint_records.append(rec) if not joint_records: return @@ -312,14 +309,13 @@ def load(self, records): # use model as key to build best map key = (inp.target.model, inp.task.workload) - if key not in best_by_model: - if inp.target.model != "unknown": - best_by_model[key] = (inp, res) - else: + if key in best_by_model: _, other_res = best_by_model[key] if np.mean(other_res.costs) > np.mean(res.costs): best_by_model[key] = (inp, res) + elif inp.target.model != "unknown": + best_by_model[key] = (inp, res) logger.debug("Finish loading %d records", counter) def _query_inside(self, target, workload): diff --git a/python/tvm/autotvm/task/space.py b/python/tvm/autotvm/task/space.py index 4d6b23162a..48917c86c3 100644 --- a/python/tvm/autotvm/task/space.py +++ b/python/tvm/autotvm/task/space.py @@ -131,21 +131,18 @@ def __init__(self, var, name=None): self.length = var elif isinstance(var, schedule.IterVar): self.name = var.var.name - if var.dom is None: - self.length = -1 - else: - self.length = get_const_int(var.dom.extent) + self.length = -1 if var.dom is None else get_const_int(var.dom.extent) elif isinstance(var, VirtualAxis): self.length = var.length else: - raise RuntimeError("Invalid type of axis: " + str(type(var))) + raise RuntimeError(f"Invalid type of axis: {str(type(var))}") @staticmethod def get_num_output(var, name=None): return 1 def __repr__(self): - return "vaxis(%s)" % self.name + return f"vaxis({self.name})" def get_factors(n): @@ -162,16 +159,18 @@ def get_factors(n): List of all factors """ step = 2 if n % 2 else 1 - ret = list( + return sorted( set( functools.reduce( list.__add__, - ([i, n // i] for i in range(1, int(math.sqrt(n)) + 1, step) if n % i == 0), + ( + [i, n // i] + for i in range(1, int(math.sqrt(n)) + 1, step) + if n % i == 0 + ), ) ) ) - ret.sort() - return ret def get_pow2s(n): @@ -223,7 +222,7 @@ def __init__(self, axes, policy, **kwargs): # Include less, equal, and round-up power-of-two numbers. May generate tails. factors = [x for x in get_pow2s(self.product) if x <= max_factor] else: - raise RuntimeError("Invalid policy: %s" % policy) + raise RuntimeError(f"Invalid policy: {policy}") # Enforce the product of all split factors equals to the axis length no_tail = kwargs.get("no_tail", policy == "factors") @@ -353,7 +352,7 @@ def __init__(self, axes, policy, **kwargs): for r in reduce_merged: self.entities.append(ReorderEntity(o + r + inner_merged)) else: - raise RuntimeError("Invalid policy: " + policy) + raise RuntimeError(f"Invalid policy: {policy}") @staticmethod def get_num_output(axes, policy, **kwargs): @@ -520,7 +519,7 @@ def __init__(self, axes, policy, **kwargs): for ann in anns: if ann not in ["none", "unroll", "vec"]: - raise RuntimeError("Invalid policy: " + policy) + raise RuntimeError(f"Invalid policy: {policy}") self.num_axis = len(axes) self.anns = [anns] * self.num_axis @@ -621,7 +620,7 @@ def apply( assert i < len(axes) - 1 axes[i + 1] = sch[op].fuse(axes[i], axes[i + 1]) else: - raise RuntimeError("Invalid annotation " + ann) + raise RuntimeError(f"Invalid annotation {ann}") return axes def __repr__(self): @@ -852,14 +851,13 @@ def get(self, index): index in the space """ if index < 0 or index >= len(self): - raise IndexError("Index out of range: size {}, got index {}".format(len(self), index)) + raise IndexError(f"Index out of range: size {len(self)}, got index {index}") entities = OrderedDict() t = index for name, space in self.space_map.items(): entities[name] = space[t % len(space)] t //= len(space) - ret = ConfigEntity(index, self.code_hash, entities, self._constraints) - return ret + return ConfigEntity(index, self.code_hash, entities, self._constraints) def __iter__(self): return self._entity_map.__iter__() @@ -879,7 +877,7 @@ def __repr__(self): res = "ConfigSpace (len=%d, space_map=\n" % len(self) for i, (name, space) in enumerate(self.space_map.items()): res += " %2d %s: %s\n" % (i, name, space) - return res + ")" + return f"{res})" _ann_to_number = { @@ -963,9 +961,7 @@ def to_json_dict(self): json_dict: dict a json serializable dictionary """ - ret = {} - ret["index"] = int(self.index) - ret["code_hash"] = self.code_hash + ret = {"index": int(self.index), "code_hash": self.code_hash} entity_map = [] for k, v in self._entity_map.items(): if isinstance(v, SplitEntity): @@ -977,7 +973,7 @@ def to_json_dict(self): elif isinstance(v, OtherOptionEntity): entity_map.append((k, "ot", v.val)) else: - raise RuntimeError("Invalid entity instance: " + v) + raise RuntimeError(f"Invalid entity instance: {v}") ret["entity"] = entity_map return ret @@ -1013,7 +1009,7 @@ def from_json_dict(json_dict): elif knob_type == "ot": entity = OtherOptionEntity(knob_args) else: - raise RuntimeError("Invalid config knob type: " + knob_type) + raise RuntimeError(f"Invalid config knob type: {knob_type}") entity_map[str(key)] = entity return ConfigEntity(index, code_hash, entity_map, constraints) @@ -1060,17 +1056,18 @@ def fallback_split(self, name, constraints): for i in reversed(range(space.num_output)): factors = get_factors(now) - find = len(factors) - 1 - for j, f in enumerate(factors): - if f > constraints[i]: - find = j - 1 - break + find = next( + (j - 1 for j, f in enumerate(factors) if f > constraints[i]), + len(factors) - 1, + ) if find >= 0: entity.size[i] = factors[find] now //= factors[find] else: - raise RuntimeError("Cannot find feasible fallback split entity for node: " + name) + raise RuntimeError( + f"Cannot find feasible fallback split entity for node: {name}" + ) def fallback_with_reference_log(self, ref_log): """A data driven fallback mechanism. @@ -1089,9 +1086,10 @@ def fallback_with_reference_log(self, ref_log): knob_names = [x for x in self.space_map.keys() if isinstance(self.space_map[x], SplitSpace)] # find best match config in reference data by matching tiling factors - factor_list = [] - for knob_name in knob_names: - factor_list.append(get_factors(self.space_map[knob_name].product)) + factor_list = [ + get_factors(self.space_map[knob_name].product) + for knob_name in knob_names + ] best_match_cfg = None best_match_score = 0 @@ -1133,4 +1131,4 @@ def __setitem__(self, name, entity): self._entity_map[name] = entity def __repr__(self): - return "%s,%s" % (str(self._entity_map)[12:-1], self.code_hash) + return f"{str(self._entity_map)[12:-1]},{self.code_hash}" diff --git a/python/tvm/autotvm/task/task.py b/python/tvm/autotvm/task/task.py index 18bc0720d5..c0fd5f2eca 100644 --- a/python/tvm/autotvm/task/task.py +++ b/python/tvm/autotvm/task/task.py @@ -60,7 +60,7 @@ def _encode(x): if isinstance(x, tensor.Tensor): return ("TENSOR", get_const_tuple(x.shape), x.dtype) if isinstance(x, (tuple, list, container.Array)): - return tuple([_encode(a) for a in x]) + return tuple(_encode(a) for a in x) if isinstance(x, (str, int, float, expr.Var, expr.Any)): return x if isinstance(x, (expr.StringImm, expr.IntImm, expr.FloatImm)): @@ -203,12 +203,7 @@ def __setstate__(self, state): ) def __repr__(self): - return "Task(func_name=%s, args=%s, kwargs=%s, workload=%s)" % ( - self.name, - self.args, - self.kwargs, - self.workload, - ) + return f"Task(func_name={self.name}, args={self.args}, kwargs={self.kwargs}, workload={self.workload})" TASK_TABLE = {} @@ -304,13 +299,11 @@ def _do_reg(f): TASK_TABLE[name] = TaskTemplate() tmpl = TASK_TABLE[name] if tmpl.fcompute is not None: - raise ValueError("Compute is already registered in autoTVM task %s" % name) + raise ValueError(f"Compute is already registered in autoTVM task {name}") tmpl.fcompute = f return f - if func: - return _do_reg(func) - return _do_reg + return _do_reg(func) if func else _do_reg def _register_task_schedule(name, func=None): @@ -336,13 +329,11 @@ def _do_reg(f): TASK_TABLE[name] = TaskTemplate() tmpl = TASK_TABLE[name] if tmpl.fschedule is not None: - raise ValueError("Schedule is already registered in autoTVM task %s" % name) + raise ValueError(f"Schedule is already registered in autoTVM task {name}") tmpl.fschedule = f return f - if func: - return _do_reg(func) - return _do_reg + return _do_reg(func) if func else _do_reg def _register_customized_task(name, func=None): @@ -368,13 +359,14 @@ def _do_reg(f): TASK_TABLE[name] = TaskTemplate() tmpl = TASK_TABLE[name] if tmpl.fcustomized is not None: - raise ValueError("Customized func is already registered in autoTVM task %s" % name) + raise ValueError( + f"Customized func is already registered in autoTVM task {name}" + ) + tmpl.fcustomized = f return f - if func: - return _do_reg(func) - return _do_reg + return _do_reg(func) if func else _do_reg def template(task_name, func=None): @@ -442,9 +434,7 @@ def wrapper(*args, **kwargs): _register_customized_task(task_name, f) return wrapper - if func: - return _decorate(func) - return _decorate + return _decorate(func) if func else _decorate def create(task_name, args, target, target_host=None): @@ -580,7 +570,7 @@ def _count_flop(exp): return 0 if isinstance(exp, expr.Call): - return sum([_count_flop(x) for x in exp.args]) + return sum(_count_flop(x) for x in exp.args) raise FlopCalculationError("Found unsupported operator in the compute expr") diff --git a/python/tvm/autotvm/task/topi_integration.py b/python/tvm/autotvm/task/topi_integration.py index 307d44810c..a6a8201371 100644 --- a/python/tvm/autotvm/task/topi_integration.py +++ b/python/tvm/autotvm/task/topi_integration.py @@ -183,7 +183,7 @@ def wrapper(*args, **kwargs): op.body, ) else: - raise RuntimeError("Unsupported op type: " + str(type(op))) + raise RuntimeError(f"Unsupported op type: {str(type(op))}") if isinstance(node, tensor.Tensor): return op.output(0) @@ -191,9 +191,7 @@ def wrapper(*args, **kwargs): return wrapper - if func: - return _decorate(func) - return _decorate + return _decorate(func) if func else _decorate def register_topi_schedule(task_name, func=None): @@ -243,9 +241,7 @@ def wrapper(outs, *args, **kwargs): return wrapper - if func: - return _decorate(func) - return _decorate + return _decorate(func) if func else _decorate def get_workload(outs, task_name=None): diff --git a/python/tvm/autotvm/tophub.py b/python/tvm/autotvm/tophub.py index 0a51bb12b2..a1987f492e 100644 --- a/python/tvm/autotvm/tophub.py +++ b/python/tvm/autotvm/tophub.py @@ -117,7 +117,7 @@ def context(target, extra_files=None): if not check_backend(tophub_location, name): continue - filename = "%s_%s.log" % (name, PACKAGE_VERSION[name]) + filename = f"{name}_{PACKAGE_VERSION[name]}.log" best_context.load(Path(AUTOTVM_TOPHUB_ROOT_PATH, filename)) break # only load one file to avoid some fallback template mismatch problem @@ -146,7 +146,7 @@ def check_backend(tophub_location, backend): assert backend in PACKAGE_VERSION, 'Cannot find backend "%s" in TopHub' % backend version = PACKAGE_VERSION[backend] - package_name = "%s_%s.log" % (backend, version) + package_name = f"{backend}_{version}.log" if Path(AUTOTVM_TOPHUB_ROOT_PATH, package_name).is_file(): return True @@ -204,7 +204,7 @@ def load_reference_log(backend, model, workload_name): if backend not in PACKAGE_VERSION: return [] version = PACKAGE_VERSION[backend] - package_name = "%s_%s.log" % (backend, version) + package_name = f"{backend}_{version}.log" filename = Path(AUTOTVM_TOPHUB_ROOT_PATH, package_name) global REFERENCE_LOG_CACHE @@ -231,9 +231,13 @@ def load_reference_log(backend, model, workload_name): if not find and counts: model = max(counts.items(), key=lambda k: k[1])[0] - for inp, res in load_from_file(filename): - if model == inp.target.model and inp.task.workload[0] == workload_name: - tmp.append((inp, res)) + tmp.extend( + (inp, res) + for inp, res in load_from_file(filename) + if model == inp.target.model + and inp.task.workload[0] == workload_name + ) + REFERENCE_LOG_CACHE[key] = tmp return REFERENCE_LOG_CACHE[key] diff --git a/python/tvm/autotvm/tuner/ga_tuner.py b/python/tvm/autotvm/tuner/ga_tuner.py index 2ecd120e85..3721c076ff 100644 --- a/python/tvm/autotvm/tuner/ga_tuner.py +++ b/python/tvm/autotvm/tuner/ga_tuner.py @@ -125,17 +125,16 @@ def update(self, inputs, results): if np.random.random() < self.mutation_prob: tmp_gene[j] = np.random.randint(dim) - if len(self.visited) < len(self.space): - while knob2point(tmp_gene, self.dims) in self.visited: - j = np.random.randint(len(self.dims)) - tmp_gene[j] = np.random.randint( - self.dims[j] # pylint: disable=invalid-sequence-index - ) - next_genes.append(tmp_gene) - self.visited.add(knob2point(tmp_gene, self.dims)) - else: + if len(self.visited) >= len(self.space): break + while knob2point(tmp_gene, self.dims) in self.visited: + j = np.random.randint(len(self.dims)) + tmp_gene[j] = np.random.randint( + self.dims[j] # pylint: disable=invalid-sequence-index + ) + next_genes.append(tmp_gene) + self.visited.add(knob2point(tmp_gene, self.dims)) self.genes = next_genes self.trial_pt = 0 self.scores = [] diff --git a/python/tvm/autotvm/tuner/model_based_tuner.py b/python/tvm/autotvm/tuner/model_based_tuner.py index f07e7fb4eb..d724ddf065 100644 --- a/python/tvm/autotvm/tuner/model_based_tuner.py +++ b/python/tvm/autotvm/tuner/model_based_tuner.py @@ -235,10 +235,7 @@ def next_batch(self, batch_size): ret = [] counter = 0 - while counter < batch_size: - if len(self.visited) >= len(self.space): - break - + while counter < batch_size and len(self.visited) < len(self.space): while self.trial_pt < len(self.trials): index = self.trials[self.trial_pt] if index not in self.visited: @@ -261,13 +258,12 @@ def next_batch(self, batch_size): def update(self, inputs, results): for inp, res in zip(inputs, results): index = inp.config.index + self.xs.append(index) if res.error_no == 0: - self.xs.append(index) flops = inp.task.flop / np.mean(res.costs) self.flops_max = max(self.flops_max, flops) self.ys.append(flops) else: - self.xs.append(index) self.ys.append(0.0) # Usually the update function is called during the tune loop # after the index is already added to the visited set. @@ -333,10 +329,7 @@ def point2knob(p, dims): def knob2point(knob, dims): """convert knob form (vector) to point form (single integer)""" - p = 0 - for j, k in enumerate(knob): - p += int(np.prod(dims[:j])) * k - return p + return sum(int(np.prod(dims[:j])) * k for j, k in enumerate(knob)) def submodular_pick(scores, knobs, n_pick, knob_weight=1.0): diff --git a/python/tvm/autotvm/tuner/tuner.py b/python/tvm/autotvm/tuner/tuner.py index 848265ce17..8ab72c681d 100644 --- a/python/tvm/autotvm/tuner/tuner.py +++ b/python/tvm/autotvm/tuner/tuner.py @@ -124,10 +124,7 @@ def tune(self, n_trial, measure_option, early_stopping=None, callbacks=(), si_pr GLOBAL_SCOPE.in_tuning = True i = error_ct = 0 errors = [] - while i < n_trial: - if not self.has_next(): - break - + while i < n_trial and self.has_next(): configs = self.next_batch(min(n_parallel, n_trial - i)) inputs = [MeasureInput(self.task.target, self.task, config) for config in configs] diff --git a/python/tvm/autotvm/tuner/xgboost_cost_model.py b/python/tvm/autotvm/tuner/xgboost_cost_model.py index d4942ce6a4..5b0b364e33 100644 --- a/python/tvm/autotvm/tuner/xgboost_cost_model.py +++ b/python/tvm/autotvm/tuner/xgboost_cost_model.py @@ -117,7 +117,7 @@ def __init__( "objective": "rank:pairwise", } else: - raise RuntimeError("Invalid loss type: " + loss_type) + raise RuntimeError(f"Invalid loss type: {loss_type}") self.xgb_params["verbosity"] = 0 if num_threads: @@ -131,7 +131,7 @@ def __init__( elif feature_type == "curve": self.feature_extract_func = _extract_curve_feature_index else: - raise RuntimeError("Invalid feature type " + feature_type) + raise RuntimeError(f"Invalid feature type {feature_type}") if upper_model: # share a same feature cache with upper model self.feature_cache = upper_model.feature_cache @@ -165,9 +165,7 @@ def _close_pool(self): self.pool = None def _get_pool(self): - if self.upper_model: - return self.upper_model._get_pool() - return self.pool + return self.upper_model._get_pool() if self.upper_model else self.pool def _base_model_discount(self): return 1.0 / (2 ** (self._sample_size / 64.0)) @@ -224,11 +222,7 @@ def fit_log(self, records, plan_size, min_seed_records=500): tic = time.time() # filter data, only pick the data with a same task - data = [] - for inp, res in records: - if inp.task.name == self.task.name: - data.append((inp, res)) - + data = [(inp, res) for inp, res in records if inp.task.name == self.task.name] logger.debug("XGB load %d entries from history log file", len(data)) # extract feature @@ -241,7 +235,7 @@ def fit_log(self, records, plan_size, min_seed_records=500): elif self.fea_type == "curve": feature_extract_func = _extract_curve_feature_log else: - raise RuntimeError("Invalid feature type: " + self.fea_type) + raise RuntimeError(f"Invalid feature type: {self.fea_type}") result = pool.map_with_error_catching(feature_extract_func, data) result = list(result) # store results so we can iterate through them twice @@ -330,9 +324,7 @@ def _get_feature(self, indexes): fea_cache = self.feature_cache.get(self.fea_type) indexes = np.array(indexes) - need_extract = [x for x in indexes if x not in fea_cache] - - if need_extract: + if need_extract := [x for x in indexes if x not in fea_cache]: pool = self._get_pool() feas = pool.map_with_error_catching(self.feature_extract_func, need_extract) for i, fea in zip(need_extract, feas): @@ -388,10 +380,7 @@ def _extract_itervar_feature_log(arg): fea = feature.get_itervar_feature_flatten(sch, args, take_log=True) x = np.concatenate((fea, list(config.get_other_option().values()))) - if res.error_no == 0: - y = inp.task.flop / np.mean(res.costs) - else: - y = 0.0 + y = inp.task.flop / np.mean(res.costs) if res.error_no == 0 else 0.0 return x, y @@ -437,10 +426,7 @@ def _extract_curve_feature_log(arg): fea = feature.get_buffer_curve_sample_flatten(sch, args, sample_n=20) x = np.concatenate((fea, list(config.get_other_option().values()))) - if res.error_no == 0: - y = inp.task.flop / np.mean(res.costs) - else: - y = 0.0 + y = inp.task.flop / np.mean(res.costs) if res.error_no == 0 else 0.0 return x, y @@ -466,11 +452,7 @@ def init(env): state["maximize_score"] = maximize state["best_iteration"] = 0 - if maximize: - state["best_score"] = float("-inf") - else: - state["best_score"] = float("inf") - + state["best_score"] = float("-inf") if maximize else float("inf") if bst is not None: if bst.attr("best_score") is not None: state["best_score"] = float(bst.attr("best_score")) @@ -508,7 +490,7 @@ def callback(env): eval_res = [] keys = list(res_dict.keys()) - keys.sort(key=lambda x: x if metric_shortname not in x else "a" + x) + keys.sort(key=lambda x: x if metric_shortname not in x else f"a{x}") for key in keys: v = res_dict[key] eval_res.append([key] + v) diff --git a/python/tvm/contrib/cblas.py b/python/tvm/contrib/cblas.py index 1dfeb801b3..1399b2f30c 100644 --- a/python/tvm/contrib/cblas.py +++ b/python/tvm/contrib/cblas.py @@ -79,9 +79,9 @@ def batch_matmul(lhs, rhs, transa=False, transb=False, iterative=False, **kwargs (b, n, m), [lhs, rhs], lambda ins, outs: tvm.tir.call_packed( - "tvm.contrib.cblas.batch_matmul" - if not iterative - else "tvm.contrib.cblas.batch_matmul_iterative", + "tvm.contrib.cblas.batch_matmul_iterative" + if iterative + else "tvm.contrib.cblas.batch_matmul", ins[0], ins[1], outs[0], @@ -89,5 +89,5 @@ def batch_matmul(lhs, rhs, transa=False, transb=False, iterative=False, **kwargs transb, ), name="C", - **kwargs, + **kwargs ) diff --git a/python/tvm/contrib/cc.py b/python/tvm/contrib/cc.py index 867cbd6012..76fbdc11b0 100644 --- a/python/tvm/contrib/cc.py +++ b/python/tvm/contrib/cc.py @@ -43,8 +43,7 @@ def get_cc(): if not _is_linux_like(): return None - env_cxx = os.environ.get("CXX") or os.environ.get("CC") - if env_cxx: + if env_cxx := os.environ.get("CXX") or os.environ.get("CC"): return env_cxx cc_names = ["g++", "gcc", "clang++", "clang", "c++", "cc"] dirs_in_path = os.get_exec_path() @@ -126,16 +125,17 @@ def get_target_by_dump_machine(compiler): def get_target_triple(): """Get target triple according to dumpmachine option of compiler.""" - if compiler: - cmd = [compiler, "-dumpmachine"] - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - (out, _) = proc.communicate() - if proc.returncode != 0: - msg = "dumpmachine error:\n" - msg += py_str(out) - return None - return py_str(out) - return None + if not compiler: + return None + + cmd = [compiler, "-dumpmachine"] + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + (out, _) = proc.communicate() + if proc.returncode != 0: + msg = "dumpmachine error:\n" + msg += py_str(out) + return None + return py_str(out) return get_target_triple @@ -209,7 +209,7 @@ def _fcompile(outputs, objects, options=None): if not output_format and hasattr(compile_func, "output_format"): output_format = compile_func.output_format - output_format = output_format if output_format else "so" + output_format = output_format or "so" if not get_target_triple and hasattr(compile_func, "get_target_triple"): get_target_triple = compile_func.get_target_triple @@ -221,21 +221,17 @@ def _fcompile(outputs, objects, options=None): def _linux_compile(output, objects, options, compile_cmd, compile_shared=False): cmd = [compile_cmd] - if compile_cmd != "nvcc": - if compile_shared or output.endswith(".so") or output.endswith(".dylib"): - cmd += ["-shared", "-fPIC"] - if sys.platform == "darwin": - cmd += ["-undefined", "dynamic_lookup"] - elif output.endswith(".obj"): - cmd += ["-c"] - else: + if compile_cmd == "nvcc": if compile_shared or output.endswith(".so") or output.endswith(".dylib"): cmd += ["--shared"] + elif compile_shared or output.endswith(".so") or output.endswith(".dylib"): + cmd += ["-shared", "-fPIC"] + if sys.platform == "darwin": + cmd += ["-undefined", "dynamic_lookup"] + elif output.endswith(".obj"): + cmd += ["-c"] cmd += ["-o", output] - if isinstance(objects, str): - cmd += [objects] - else: - cmd += objects + cmd += [objects] if isinstance(objects, str) else objects if options: cmd += options proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) diff --git a/python/tvm/contrib/clang.py b/python/tvm/contrib/clang.py index 9894447304..c8e2885ff6 100644 --- a/python/tvm/contrib/clang.py +++ b/python/tvm/contrib/clang.py @@ -52,7 +52,7 @@ def find_clang(required=True): valid_list = [utils.which(x) for x in cc_list] valid_list = [x for x in valid_list if x] if not valid_list and required: - raise RuntimeError("cannot find clang, candidates are: " + str(cc_list)) + raise RuntimeError(f"cannot find clang, candidates are: {cc_list}") return valid_list @@ -80,11 +80,11 @@ def create_llvm(inputs, output=None, options=None, cc=None): code : str The generated llvm text IR. """ - cc = cc if cc else find_clang()[0] + cc = cc or find_clang()[0] cmd = [cc] cmd += ["-S", "-emit-llvm"] temp = utils.tempdir() - output = output if output else temp.relpath("output.ll") + output = output or temp.relpath("output.ll") inputs = [inputs] if isinstance(inputs, str) else inputs input_files = [] for i, code in enumerate(inputs): diff --git a/python/tvm/contrib/cudnn.py b/python/tvm/contrib/cudnn.py index d3128a63dd..6d1f5447cf 100644 --- a/python/tvm/contrib/cudnn.py +++ b/python/tvm/contrib/cudnn.py @@ -49,10 +49,7 @@ def exists(): exists. Otherwise, False. """ func = tvm.get_global_func("tvm.contrib.cudnn.exists", allow_missing=True) - if func is None: - return False - - return bool(func()) + return False if func is None else bool(func()) def algo_to_index(algo_type, algo_name): @@ -202,7 +199,7 @@ def conv_output_shape( """ assert len(x_shape) == len(w_shape) - assert len(x_shape) in (4, 5) + assert len(x_shape) in {4, 5} if tensor_format == 0: n_output = x_shape[0] @@ -232,14 +229,13 @@ def conv_output_shape( w_shape = w_shape[2:] else: - raise ValueError("Unknown CuDNN tensor format: '{}'".format(tensor_format)) + raise ValueError(f"Unknown CuDNN tensor format: '{tensor_format}'") x_lanes = tvm.runtime.DataType(data_dtype).lanes - assert x_chan * x_lanes == w_chan_input * groups, ( - "Mismatched dimensions, data has {} channels/group " - "(dimension {} with {} lanes/value, {} groups), " - "but weights require {} input channels/group" - ).format(x_chan // groups, x_chan, x_lanes, groups, w_chan_input) + assert ( + x_chan * x_lanes == w_chan_input * groups + ), f"Mismatched dimensions, data has {x_chan // groups} channels/group (dimension {x_chan} with {x_lanes} lanes/value, {groups} groups), but weights require {w_chan_input} input channels/group" + output_dims = [] for x_shape_i, w_shape_i, pad_i, stride_i, dilation_i in zip( @@ -253,7 +249,7 @@ def conv_output_shape( elif tensor_format == 1: output = [n_output, *output_dims, c_output] else: - raise ValueError("Unknown CuDNN tensor format: '{}'".format(tensor_format)) + raise ValueError(f"Unknown CuDNN tensor format: '{tensor_format}'") return output @@ -305,7 +301,7 @@ def conv_dgrad_shape( dy_shape = dy_shape[1:-1] w_shape = w_shape[1:-1] else: - raise ValueError("Unsupported CuDNN tensor format: '{}'".format(tensor_format)) + raise ValueError(f"Unsupported CuDNN tensor format: '{tensor_format}'") input_dims = [] for dy_shape_i, w_shape_i, pad_i, stride_i, dilation_i, out_pad in zip( @@ -316,12 +312,7 @@ def conv_dgrad_shape( ) input_dims.append(input_dim) - if tensor_format == 0: - output = [N, C, *input_dims] - else: - output = [N, *input_dims, C] - - return output + return [N, C, *input_dims] if tensor_format == 0 else [N, *input_dims, C] def _conv_find_algo( @@ -342,7 +333,7 @@ def _conv_find_algo( and the convolution type. """ dims = len(x_shape) - assert dims in (4, 5) + assert dims in {4, 5} pad, stride, dilation, xshape, wshape = _prepare_global_func_params( dims - 2, pad, stride, dilation, x_shape, w_shape @@ -577,7 +568,7 @@ def conv_forward(x, w, pad, stride, dilation, conv_mode, tensor_format, algo, co The result tensor """ dims = len(x.shape) - assert dims in (4, 5) + assert dims in {4, 5} conv_dtype = x.dtype if conv_dtype is None else conv_dtype pad, stride, dilation, _, _ = _prepare_global_func_params(dims - 2, pad, stride, dilation) diff --git a/python/tvm/contrib/cutlass/build.py b/python/tvm/contrib/cutlass/build.py index 0c8c2ad0b2..03a7127cf7 100644 --- a/python/tvm/contrib/cutlass/build.py +++ b/python/tvm/contrib/cutlass/build.py @@ -52,20 +52,22 @@ def _get_cutlass_compile_options(sm, threads, use_fast_math=False): cutlass_include = os.path.join(cutlass_root, "include") cutlass_util_include = os.path.join(cutlass_root, "tools/util/include") - kwargs = {} - kwargs["cc"] = "nvcc" - kwargs["options"] = [ - "-c", - "-DCUTLASS_ENABLE_TENSOR_CORE_MMA=1", - "-gencode=arch=compute_%d,code=[sm_%d,compute_%d]" % (sm, sm, sm), - "-Xcompiler=-fPIC", - "-Xcompiler=-Wconversion", - "-Xcompiler=-fno-strict-aliasing", - "-O3", - "-std=c++14", - "-I" + cutlass_include, - "-I" + cutlass_util_include, - ] + kwargs = { + "cc": "nvcc", + "options": [ + "-c", + "-DCUTLASS_ENABLE_TENSOR_CORE_MMA=1", + "-gencode=arch=compute_%d,code=[sm_%d,compute_%d]" % (sm, sm, sm), + "-Xcompiler=-fPIC", + "-Xcompiler=-Wconversion", + "-Xcompiler=-fno-strict-aliasing", + "-O3", + "-std=c++14", + f"-I{cutlass_include}", + f"-I{cutlass_util_include}", + ], + } + if use_fast_math: kwargs["options"].append("-DCUTLASS_USE_TANH_FOR_SIGMOID") cuda_ver = get_cuda_version() @@ -93,7 +95,11 @@ def visit_call(self, call): self.signature["ret_dtype"] = op.ret_type.dtype self.visit(op.body) - if str(op) in ["nn.conv2d", "nn.conv2d_transpose", "nn.conv2d_backward_weight"]: + if str(op) in { + "nn.conv2d", + "nn.conv2d_transpose", + "nn.conv2d_backward_weight", + }: self.op_attrs = call.attrs for arg in call.args: @@ -293,7 +299,9 @@ def handle_conv2d( def num_cutlass_partitions(mod): - return sum([(1 if "cutlass" in var.name_hint else 0) for var in mod.get_global_vars()]) + return sum( + 1 if "cutlass" in var.name_hint else 0 for var in mod.get_global_vars() + ) def tune_cutlass_kernels( @@ -356,9 +364,9 @@ def tune_cutlass_kernels( num_cutlass_partition = 0 for var in mod.get_global_vars(): fun_name = var.name_hint - func = mod[fun_name] if "cutlass" in fun_name: num_cutlass_partition += 1 + func = mod[fun_name] new_func = tune_cutlass_function( func, use_3xtf32, @@ -432,7 +440,7 @@ def tune_cutlass_function( op_type = annotator.signature["op_type"] new_attrs = {"op_type": op_type} - new_attrs.update(annotator.signature) + new_attrs |= annotator.signature new_attrs.update(func.attrs) arg0_shape = new_attrs["arg0_shape"] arg1_shape = new_attrs["arg1_shape"] @@ -504,7 +512,7 @@ def tune_cutlass_function( ) ) else: - raise ValueError("%s unsupported composite" % op_type) + raise ValueError(f"{op_type} unsupported composite") new_attrs = tvm.ir.make_node("DictAttrs", **new_attrs) return relay.Function( diff --git a/python/tvm/contrib/cutlass/conv2d_operation.py b/python/tvm/contrib/cutlass/conv2d_operation.py index 162e8f6678..6cbc03a7eb 100644 --- a/python/tvm/contrib/cutlass/conv2d_operation.py +++ b/python/tvm/contrib/cutlass/conv2d_operation.py @@ -60,21 +60,15 @@ def core_name(self): if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp: inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape) - if ( - self.tile_description.math_instruction.element_a != self.A.element - and self.tile_description.math_instruction.element_a != self.accumulator_type() - ): + if self.tile_description.math_instruction.element_a not in [ + self.A.element, + self.accumulator_type(), + ]: intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a] else: inst_shape = "" - return "%s%s%s%s_%s" % ( - ShortDataTypeNames[self.accumulator_type()], - inst_shape, - intermediate_type, - ConvKindNames[self.conv_kind], - IteratorAlgorithmNames[self.iterator_algorithm], - ) + return f"{ShortDataTypeNames[self.accumulator_type()]}{inst_shape}{intermediate_type}{ConvKindNames[self.conv_kind]}_{IteratorAlgorithmNames[self.iterator_algorithm]}" def extended_name(self): """Append data types if they differ from compute type.""" @@ -103,7 +97,7 @@ def extended_name(self): return extended_name def layout_name(self): - return "%s" % (ShortLayoutTypeNames[self.A.layout]) + return f"{ShortLayoutTypeNames[self.A.layout]}" def procedural_name(self): """ @@ -336,14 +330,13 @@ def emit( template = substitute_template( gemm_template, {"epilogue": self.epilogue_residual_block} ) - values.update( - { - "unary_op": residual_block_info["unary_op"], - "binary_op": residual_block_info["binary_op"], - "activation": residual_block_info["activation"], - "conv_kernel_postfix": "WithBroadcast", - } - ) + values |= { + "unary_op": residual_block_info["unary_op"], + "binary_op": residual_block_info["binary_op"], + "activation": residual_block_info["activation"], + "conv_kernel_postfix": "WithBroadcast", + } + elif no_beta_scaling: template = substitute_template( gemm_template, {"epilogue": self.epilogue_no_beta_scaling} diff --git a/python/tvm/contrib/cutlass/conv2d_profiler.py b/python/tvm/contrib/cutlass/conv2d_profiler.py index 1ed5550e0a..d216801086 100644 --- a/python/tvm/contrib/cutlass/conv2d_profiler.py +++ b/python/tvm/contrib/cutlass/conv2d_profiler.py @@ -205,11 +205,10 @@ def __init__(self): ) def emit(self, op_def, op_name, element_output, split_k_slices=1): - src = self.template.render( + return self.template.render( OperatorDef=op_def, OperatorName=op_name, ElementOutput=DataTypeTag[element_output], SplitK=split_k_slices, Reduction=self.reduction if split_k_slices > 1 else "", ) - return src diff --git a/python/tvm/contrib/cutlass/gemm_operation.py b/python/tvm/contrib/cutlass/gemm_operation.py index 1a5e945222..77177d251e 100644 --- a/python/tvm/contrib/cutlass/gemm_operation.py +++ b/python/tvm/contrib/cutlass/gemm_operation.py @@ -54,24 +54,18 @@ def core_name(self): inst_shape = "" intermediate_type = "" - if ( - self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp - or self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp - ): + if self.tile_description.math_instruction.opcode_class in [ + OpcodeClass.TensorOp, + OpcodeClass.WmmaTensorOp, + ]: inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape) - if ( - self.tile_description.math_instruction.element_a != self.A.element - and self.tile_description.math_instruction.element_a - != self.tile_description.math_instruction.element_accumulator - ): + if self.tile_description.math_instruction.element_a not in [ + self.A.element, + self.tile_description.math_instruction.element_accumulator, + ]: intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a] - return "%s%s%s%s" % ( - self.short_math_name(), - inst_shape, - intermediate_type, - "gemm", - ) + return f"{self.short_math_name()}{inst_shape}{intermediate_type}gemm" def extended_name(self): """Append data types if they differ from compute type.""" @@ -100,7 +94,7 @@ def extended_name(self): return extended_name def layout_name(self): - return "%s%s" % (ShortLayoutTypeNames[self.A.layout], ShortLayoutTypeNames[self.B.layout]) + return f"{ShortLayoutTypeNames[self.A.layout]}{ShortLayoutTypeNames[self.B.layout]}" def procedural_name(self): """The full procedural name indicates architecture, extended name, tile size, diff --git a/python/tvm/contrib/cutlass/gemm_profiler.py b/python/tvm/contrib/cutlass/gemm_profiler.py index 13679cd05c..bfaa0f435f 100644 --- a/python/tvm/contrib/cutlass/gemm_profiler.py +++ b/python/tvm/contrib/cutlass/gemm_profiler.py @@ -185,7 +185,7 @@ def __init__(self): ) def emit(self, op_name, op_def, dtype_a, dtype_b, dtype_c, ld): - src = self.template.render( + return self.template.render( OperatorName=op_name, OperatorDef=op_def, DTypeA=dtype_a, @@ -193,4 +193,3 @@ def emit(self, op_name, op_def, dtype_a, dtype_b, dtype_c, ld): DTypeC=dtype_c, LeadingDim=ld, ) - return src diff --git a/python/tvm/contrib/cutlass/gen_conv2d.py b/python/tvm/contrib/cutlass/gen_conv2d.py index bb26a47a55..7721a7e9e7 100644 --- a/python/tvm/contrib/cutlass/gen_conv2d.py +++ b/python/tvm/contrib/cutlass/gen_conv2d.py @@ -269,14 +269,21 @@ def select_op( out_dtype, data_dtype, weight_dtype, - partial(enumerate_conv2d_operators, conv_kind, stride_support, split_k_slices), - lambda align: all([dim % align == 0 for dim in [IC, OC]]), + partial( + enumerate_conv2d_operators, + conv_kind, + stride_support, + split_k_slices, + ), + lambda align: all(dim % align == 0 for dim in [IC, OC]), use_3xtf32, profile_all_alignments, - # Use fp32 accumulation for wgrad to align with cuDNN - accumlator_dtype="float32" if conv_kind == ConvKind.Wgrad else out_dtype, + accumlator_dtype="float32" + if conv_kind == ConvKind.Wgrad + else out_dtype, ) + if not find_first_valid: self.engine.compile_all(ops, use_multiprocessing) diff --git a/python/tvm/contrib/cutlass/gen_gemm.py b/python/tvm/contrib/cutlass/gen_gemm.py index f55f4f7622..07c2e519f1 100644 --- a/python/tvm/contrib/cutlass/gen_gemm.py +++ b/python/tvm/contrib/cutlass/gen_gemm.py @@ -181,9 +181,10 @@ def get_default( if arg0_dtype == "float32": default_kernel_name = ( - default_kernel_name[0] if not use_3xtf32 else default_kernel_name[1] + default_kernel_name[1] if use_3xtf32 else default_kernel_name[0] ) + filtered = list(filter(lambda op: op["name"] == default_kernel_name, ops)) assert len(filtered) == 1 op = filtered[0] @@ -228,13 +229,13 @@ def select_op( arg0_dtype, arg1_dtype, enumerate_gemm_operators, - lambda align: all([dim % align == 0 for dim in [M, N, K]]), + lambda align: all(dim % align == 0 for dim in [M, N, K]), use_3xtf32, profile_all_alignments=profile_all_alignments, - # TODO(masahi): Invesitigate when fp32 accumulation is needed for gemm accumlator_dtype=out_dtype, ) + if not find_first_valid: self.engine.compile_all(ops, use_multiprocessing) diff --git a/python/tvm/contrib/cutlass/gen_tensor_op.py b/python/tvm/contrib/cutlass/gen_tensor_op.py index 3c7e1aba2a..83ee00cfe9 100644 --- a/python/tvm/contrib/cutlass/gen_tensor_op.py +++ b/python/tvm/contrib/cutlass/gen_tensor_op.py @@ -126,7 +126,7 @@ def generate_sm75_tensor_op_1688( ] alignment_constraints = [align for align in alignment_constraints if check_align(align)] - assert len(alignment_constraints) > 0 + assert alignment_constraints if not profile_all_alignments: alignment_constraints = [alignment_constraints[0]] @@ -249,7 +249,7 @@ def get_tile_descriptions(math_inst): alignment_constraints = [align for align in alignment_constraints if check_align(align)] - if len(alignment_constraints) > 0 and not profile_all_alignments: + if alignment_constraints and not profile_all_alignments: alignment_constraints = [alignment_constraints[0]] if arg0_dtype != "float32" and arg1_dtype != "float32": @@ -267,7 +267,7 @@ def get_tile_descriptions(math_inst): # TF32 (float32 + float32 case) is only supported on sm80 sm75_kernels = [] - if len(alignment_constraints) > 0: + if alignment_constraints: sm80_kernels = generate_tensor_op_common( math_instructions, alignment_constraints, get_tile_descriptions, op_creator ) @@ -359,8 +359,7 @@ def evaluate(self, op, args): # Bail out if compilation fails for a whatever reason (e.g. static assert failure) return float("inf") cmd = [opath] - for arg in args: - cmd.append(str(arg)) + cmd.extend(str(arg) for arg in args) try: logger.info("invoking evaluation %s", cmd) sp = subprocess.run(cmd, capture_output=True, check=True) diff --git a/python/tvm/contrib/debugger/debug_executor.py b/python/tvm/contrib/debugger/debug_executor.py index 5b03ebb277..922d04c1cf 100644 --- a/python/tvm/contrib/debugger/debug_executor.py +++ b/python/tvm/contrib/debugger/debug_executor.py @@ -152,7 +152,7 @@ def _get_dump_path(self, device): Directory path where the graph and node outputs will be stored. """ # save to file - folder_name = _DUMP_PATH_PREFIX + "device_" + folder_name = f"{_DUMP_PATH_PREFIX}device_" folder_name = folder_name + device.replace(":", "_") path = os.path.join(self._dump_root, folder_name) self._ensure_dir(path) @@ -204,8 +204,7 @@ def _execute_next_node(self, node_index, output_index): output_tensors : Array Array of output tensors """ - output_tensors = self._execute_next_node_get_output(node_index, output_index) - return output_tensors + return self._execute_next_node_get_output(node_index, output_index) def _run_per_layer(self): """Execute up to each node and each debug output will be @@ -271,7 +270,7 @@ def debug_get_output(self, node, out=None): elif isinstance(node, int): node_index = node else: - raise RuntimeError(f"Require node index or name only.") + raise RuntimeError("Require node index or name only.") self._debug_get_output(node_index, out) @@ -406,7 +405,7 @@ def run_individual( (nodes_count,) = struct.unpack_from(format_size, res, offset) offset += struct.calcsize(format_size) format_data = "@" + repeat * "d" - for _ in range(0, nodes_count): + for _ in range(nodes_count): ret = struct.unpack_from(format_data, res, offset) offset += struct.calcsize(format_data) results.append([*ret]) diff --git a/python/tvm/contrib/debugger/debug_result.py b/python/tvm/contrib/debugger/debug_result.py index 006edd3458..dcc24b3651 100644 --- a/python/tvm/contrib/debugger/debug_result.py +++ b/python/tvm/contrib/debugger/debug_result.py @@ -76,11 +76,13 @@ def _update_graph_json(self): nodes_len = len(self._nodes_list) for i in range(nodes_len): node = self._nodes_list[i] - input_list = [] - for input_node in node["inputs"]: - input_list.append(self._nodes_list[input_node[0]]["name"]) + input_list = [ + self._nodes_list[input_node[0]]["name"] + for input_node in node["inputs"] + ] + node["inputs"] = input_list - dtype = str("type: " + self._dtype_list[1][i]) + dtype = str(f"type: {self._dtype_list[1][i]}") if "attrs" not in node: node["attrs"] = {} node["op"] = "param" @@ -225,13 +227,13 @@ def get_debug_result(self, sort_by_time=True): ] eid = 0 data = [] - total_time = sum([np.mean(time) for time in self._time_list]) + total_time = sum(np.mean(time) for time in self._time_list) for node, time in zip(self._nodes_list, self._time_list): time_mean = np.mean(time) num_outputs = self.get_graph_node_output_num(node) - for j in range(num_outputs): + for _ in range(num_outputs): op = node["op"] - if node["op"] == "param": + if op == "param": eid += 1 continue name = node["name"] @@ -262,8 +264,7 @@ def get_debug_result(self, sort_by_time=True): fmt = fmt + "{:<" + str(max_len + 2) + "}" log = [fmt.format(*header)] log.append(fmt.format(*lines)) - for row in data: - log.append(fmt.format(*row)) + log.extend(fmt.format(*row) for row in data) return "\n".join(log) def display_debug_result(self, sort_by_time=True): diff --git a/python/tvm/contrib/download.py b/python/tvm/contrib/download.py index e0c13acc8d..22baf36e12 100644 --- a/python/tvm/contrib/download.py +++ b/python/tvm/contrib/download.py @@ -169,7 +169,7 @@ def download_testdata(url, relpath, module=None, overwrite=False): elif isinstance(module, (list, tuple)): module_path = Path(*module) else: - raise ValueError("Unsupported module: " + module) + raise ValueError(f"Unsupported module: {module}") abspath = Path(TEST_DATA_ROOT_PATH, module_path, relpath) download(url, abspath, overwrite=overwrite, size_compare=False) return str(abspath) diff --git a/python/tvm/contrib/ethosu/cascader/device_config.py b/python/tvm/contrib/ethosu/cascader/device_config.py index f654a2598b..a138089e75 100644 --- a/python/tvm/contrib/ethosu/cascader/device_config.py +++ b/python/tvm/contrib/ethosu/cascader/device_config.py @@ -173,22 +173,26 @@ def _get_output_cycles( bw_limit = 0 if op_type == "ethosu_pooling" and op_str == "MAX": cycles = self._output_cycles[0] - elif op_type in ("ethosu_pooling", "ethosu_conv2d", "ethosu_depthwise_conv2d"): + elif op_type in { + "ethosu_pooling", + "ethosu_conv2d", + "ethosu_depthwise_conv2d", + }: cycles = self._output_cycles[1] if ifm_dtype == "int8" else self._output_cycles[2] elif op_type == "ethosu_binary_elementwise": # Binary Bandwidth Limitations - if ifm_dtype == "int8": - bw_limit = 0.125 if ofm_dtype == "int8" else 0.75 - elif ifm_dtype == "int16": + if ifm_dtype == "int16": bw_limit = 0.75 if ofm_dtype == "int16" else 1 + elif ifm_dtype == "int8": + bw_limit = 0.125 if ofm_dtype == "int8" else 0.75 else: bw_limit = 1.5 - if op_str in ("MIN", "MAX"): + if op_str in {"MIN", "MAX"}: cycles = self._output_cycles[1] elif op_str == "MUL": cycles = self._output_cycles[2] - if op_str in ("ADD", "SUB"): + if op_str in {"ADD", "SUB"}: if ofm_dtype == "int32": cycles = ( self._output_cycles[2] if ifm_dtype == "int32" else self._output_cycles[3] @@ -205,9 +209,9 @@ def _get_output_cycles( if op_str == "CLZ": cycles = self._output_cycles[1] - elif op_str in ("SHL", "SHR"): + elif op_str in {"SHL", "SHR"}: cycles = self._output_cycles[2] - elif op_str in ("LRELU", "ABS"): + elif op_str in {"LRELU", "ABS"}: cycles = self._output_cycles[1] if ifm_dtype == "int16": bw_limit = 0.5 @@ -215,7 +219,7 @@ def _get_output_cycles( act_cycles = 0 if activation == "CLIP": act_cycles = self._activation_cycles[0] - elif activation in ("LUT", "TANH", "SIGMOID"): + elif activation in {"LUT", "TANH", "SIGMOID"}: act_cycles = self._activation_cycles[1] return max((cycles / self._output_units), act_cycles, bw_limit) @@ -239,12 +243,8 @@ def _get_delay_cycles(self, op_type: str, ifm_dtype: str) -> int: int The amount of delay cycles """ - if op_type in ("ethosu_conv2d", "ethosu_depthwise2d", "ethosu_pooling"): - if ifm_dtype == "int16": - return self._delay_cycles[1] - - return self._delay_cycles[0] - + if op_type in {"ethosu_conv2d", "ethosu_depthwise2d", "ethosu_pooling"}: + return self._delay_cycles[1] if ifm_dtype == "int16" else self._delay_cycles[0] return 0 def _get_weight_decoder_cycles(self, op_type: str) -> int: @@ -261,7 +261,7 @@ def _get_weight_decoder_cycles(self, op_type: str) -> int: int Estimated cycles for weight decoding """ - if op_type in ("ethosu_conv2d", "ethosu_depthwise2d"): + if op_type in {"ethosu_conv2d", "ethosu_depthwise2d"}: return 32 * self._micro_block.depth // 8 return 0 @@ -328,13 +328,15 @@ def _get_input_block( ) if op_type == "ethosu_conv2d": - if dtype == "int8": - if partkernel: - depth = self._align(min(32, input_shape.depth), 8) - else: - depth = self._align(min(16, input_shape.depth), 8) - elif dtype == "int16": + if dtype == "int16": depth = self._align(min(16, input_shape.depth), 4) + elif dtype == "int8": + depth = ( + self._align(min(32, input_shape.depth), 8) + if partkernel + else self._align(min(16, input_shape.depth), 8) + ) + else: depth = self._align(min(8, input_shape.depth), 2) else: @@ -413,16 +415,11 @@ def _get_subkernels(self, dilated_kernel_h: int, dilated_kernel_w: int): subkernels = [] for y in subkernels_y: - for x in subkernels_x: - subkernels.append((y, x)) - + subkernels.extend((y, x) for x in subkernels_x) return subkernels def _get_accumulator_width(self, op_type: str, ifm_dtype: str): - if ifm_dtype == "int16" and op_type != "ethosu_pooling": - return 5 - - return 4 + return 5 if ifm_dtype == "int16" and op_type != "ethosu_pooling" else 4 def is_partkernel( self, op_type: str, ifm_channels: int, ifm_dtype: str, kernel_elements: int @@ -566,14 +563,13 @@ def get_elementwise_block_config( # Split the block in half until it fits into SHRAM max_height, max_width, max_depth = self._max_block_shape.as_list()[1:] if output_layout == "NHCWB16": - output_height = output_shape[1] output_width = output_shape[3] output_channels = output_shape[2] * 16 else: - output_height = output_shape[1] output_width = output_shape[2] output_channels = output_shape[3] + output_height = output_shape[1] output_nhwc_block = [ 1, _round_up(min(output_height, max_height), self._micro_block.height), @@ -581,7 +577,7 @@ def get_elementwise_block_config( _round_up(min(output_channels, max_depth), self._micro_block.depth), ] output_block = self._create_layout_block(output_nhwc_block, output_layout) - split_order = (a for a in [1, 2, 3]) + split_order = iter([1, 2, 3]) split_axis = next(split_order) offset = [0] * len(output_block) @@ -643,19 +639,21 @@ def _get_subkernel_propagator( transform = ifm_propagator.transform if op_type != "ethosu_identity": + transform[1][-1] = min(transform[1][-1], self._subkernel_limits[0] - stride_h) if input_layout == "NHCWB16": - transform[1][-1] = min(transform[1][-1], self._subkernel_limits[0] - stride_h) transform[3][-1] = min(transform[3][-1], self._subkernel_limits[1] - stride_w) else: - transform[1][-1] = min(transform[1][-1], self._subkernel_limits[0] - stride_h) transform[2][-1] = min(transform[2][-1], self._subkernel_limits[1] - stride_w) - if op_type in ("ethosu_pooling", "ethosu_depthwise_conv2d"): - if output_layout == "NHCWB16" and input_layout == "NHWC": - transform[3][-1] = depth - elif output_layout == "NHCWB16" and input_layout == "NHCWB16": + if ( + op_type in ("ethosu_pooling", "ethosu_depthwise_conv2d") + and output_layout == "NHCWB16" + ): + if input_layout == "NHCWB16": transform[2][-1] = 1 + ((depth - 1) // 16) + elif input_layout == "NHWC": + transform[3][-1] = depth return Propagator(transform, ifm_propagator.offset) def get_valid_block_configs( @@ -831,15 +829,10 @@ def get_valid_block_configs( # Block culling disabled - add all block configs that fit valid_block_configs.append(block_config) else: - # Add block config only if it's not dominated by an existing block. - # A block config is dominated by another if its output_shape is greater - # or equal in every dimension and strictly greater in at least one - # dimension. - dominated = False - for valid_block in valid_block_configs: - if block_config < valid_block: - dominated = True - break + dominated = any( + block_config < valid_block + for valid_block in valid_block_configs + ) if not dominated: valid_block_configs.append(block_config) diff --git a/python/tvm/contrib/graph_executor.py b/python/tvm/contrib/graph_executor.py index 08dae307a8..61eea769b3 100644 --- a/python/tvm/contrib/graph_executor.py +++ b/python/tvm/contrib/graph_executor.py @@ -103,9 +103,7 @@ def get_device(libmod, device): assert _rpc_ffi_api.SessTableIndex(libmod) == cur_dev._rpc_sess._tbl_index num_rpc_dev += 1 device_type = cur_dev.device_type % rpc_base.RPC_SESS_MASK - device_type_id.append(device_type) - device_type_id.append(cur_dev.device_id) - + device_type_id.extend((device_type, cur_dev.device_id)) if 0 < num_rpc_dev < len(device): raise ValueError("Either all or none of the devices should be rpc.") return device, num_rpc_dev, device_type_id @@ -188,11 +186,7 @@ def set_input(self, key=None, value=None, **params): keys = list(params.keys()) keys.sort(key=lambda x: -np.prod(params[x].shape)) for k in keys: - # TODO(zhiics) Skip the weights for submodule in a better way. - # We should use ConstLoaderModule for initialization and remove - # params from set_input - val = self._get_input(k) - if val: + if val := self._get_input(k): self._get_input(k).copyfrom(params[k]) def run(self, **input_dict): @@ -434,8 +428,7 @@ def benchmark( # Have to unpack kwargs into a single list args = [] for k, v in kwargs.items(): - args.append(k) - args.append(v) + args.extend((k, v)) return self.module.time_evaluator( "run_from_inputs", device, diff --git a/python/tvm/contrib/mkl.py b/python/tvm/contrib/mkl.py index 449d660c90..c76a2a469a 100644 --- a/python/tvm/contrib/mkl.py +++ b/python/tvm/contrib/mkl.py @@ -112,9 +112,9 @@ def batch_matmul(lhs, rhs, transa=False, transb=False, iterative=False, **kwargs (b, n, m), [lhs, rhs], lambda ins, outs: tvm.tir.call_packed( - "tvm.contrib.mkl.batch_matmul" - if not iterative - else "tvm.contrib.mkl.batch_matmul_iterative", + "tvm.contrib.mkl.batch_matmul_iterative" + if iterative + else "tvm.contrib.mkl.batch_matmul", ins[0], ins[1], outs[0], @@ -122,5 +122,5 @@ def batch_matmul(lhs, rhs, transa=False, transb=False, iterative=False, **kwargs transb, ), name="C", - **kwargs, + **kwargs ) diff --git a/python/tvm/contrib/mxnet.py b/python/tvm/contrib/mxnet.py index 6e551dfe46..d42107eb29 100644 --- a/python/tvm/contrib/mxnet.py +++ b/python/tvm/contrib/mxnet.py @@ -74,5 +74,5 @@ def _get_bridge_func(): _wrap_async = _get_bridge_func() tvm._ffi.registry.register_extension(mxnet.nd.NDArray) - const_loc = const_loc if const_loc else [] + const_loc = const_loc or [] return _wrap_async(func, tvm.runtime._ffi_api.TVMSetStream, len(const_loc), *const_loc) diff --git a/python/tvm/contrib/ndk.py b/python/tvm/contrib/ndk.py index 275d40f103..cf23050288 100644 --- a/python/tvm/contrib/ndk.py +++ b/python/tvm/contrib/ndk.py @@ -46,12 +46,8 @@ def create_shared(output, objects, options=None): cmd = [compiler] cmd += ["-o", output] - if isinstance(objects, str): - cmd += [objects] - else: - cmd += objects - - options = options if options else ["-shared", "-fPIC", "-lm"] + cmd += [objects] if isinstance(objects, str) else objects + options = options or ["-shared", "-fPIC", "-lm"] cmd += options proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) diff --git a/python/tvm/contrib/nvcc.py b/python/tvm/contrib/nvcc.py index 5a104be996..172f018dff 100644 --- a/python/tvm/contrib/nvcc.py +++ b/python/tvm/contrib/nvcc.py @@ -70,14 +70,14 @@ def compile_cuda(code, target_format="ptx", arch=None, options=None, path_target if target_format not in ["cubin", "ptx", "fatbin"]: raise ValueError("target_format must be in cubin, ptx, fatbin") temp_code = temp.relpath("my_kernel.cu") - temp_target = temp.relpath("my_kernel.%s" % target_format) + temp_target = temp.relpath(f"my_kernel.{target_format}") with open(temp_code, "w") as out_file: out_file.write(code) - file_target = path_target if path_target else temp_target + file_target = path_target or temp_target cmd = ["nvcc"] - cmd += ["--%s" % target_format, "-O3"] + cmd += [f"--{target_format}", "-O3"] if isinstance(arch, list): cmd += arch elif isinstance(arch, str): @@ -113,10 +113,10 @@ def compile_cuda(code, target_format="ptx", arch=None, options=None, path_target raise RuntimeError(msg) with open(file_target, "rb") as f: - data = bytearray(f.read()) - if not data: + if data := bytearray(f.read()): + return data + else: raise RuntimeError("Compilation error: empty result is generated") - return data def find_cuda_path(): @@ -186,8 +186,7 @@ def get_cuda_version(cuda_path=None): @tvm._ffi.register_func def tvm_callback_cuda_compile(code): """use nvcc to generate fatbin code for better optimization""" - ptx = compile_cuda(code, target_format="fatbin") - return ptx + return compile_cuda(code, target_format="fatbin") @tvm._ffi.register_func("tvm_callback_libdevice_path") @@ -242,7 +241,7 @@ def find_libdevice_path(arch): selected_path = fn if selected_path is None: - raise RuntimeError("Cannot find libdevice for arch {}".format(arch)) + raise RuntimeError(f"Cannot find libdevice for arch {arch}") path = os.path.join(lib_path, selected_path) return path @@ -276,7 +275,7 @@ def get_target_compute_version(target=None): target = target or Target.current() if target and target.arch: major, minor = target.arch.split("_")[1] - return major + "." + minor + return f"{major}.{minor}" # 3. GPU compute version if tvm.cuda(0).exist: @@ -310,7 +309,7 @@ def parse_compute_version(compute_version): return major, minor except (IndexError, ValueError) as err: # pylint: disable=raise-missing-from - raise RuntimeError("Compute version parsing error: " + str(err)) + raise RuntimeError(f"Compute version parsing error: {str(err)}") def have_fp16(compute_version): @@ -324,12 +323,7 @@ def have_fp16(compute_version): major, minor = parse_compute_version(compute_version) # fp 16 support in reference to: # https://docs.nvidia.com/cuda/cuda-c-programming-guide/#arithmetic-instructions - if major == 5 and minor == 3: - return True - if major >= 6: - return True - - return False + return True if major == 5 and minor == 3 else major >= 6 def have_int8(compute_version): @@ -341,10 +335,7 @@ def have_int8(compute_version): compute capability of a GPU (e.g. "6.1") """ major, _ = parse_compute_version(compute_version) - if major >= 6: - return True - - return False + return major >= 6 def have_tensorcore(compute_version=None, target=None): @@ -372,21 +363,16 @@ def have_tensorcore(compute_version=None, target=None): compute_version = target.attrs["arch"] # Compute version will be in the form "sm_{major}{minor}" major, minor = compute_version.split("_")[1] - compute_version = major + "." + minor + compute_version = f"{major}.{minor}" major, _ = parse_compute_version(compute_version) - if major >= 7: - return True - - return False + return major >= 7 def have_cudagraph(): """Either CUDA Graph support is provided""" try: cuda_ver = get_cuda_version() - if cuda_ver < (10, 0): - return False - return True + return cuda_ver >= (10, 0) except RuntimeError: return False @@ -400,7 +386,4 @@ def have_bf16(compute_version): compute capability of a GPU (e.g. "8.0") """ major, _ = parse_compute_version(compute_version) - if major >= 8: - return True - - return False + return major >= 8 diff --git a/python/tvm/contrib/peak.py b/python/tvm/contrib/peak.py index 48d0d31a45..54577d560b 100644 --- a/python/tvm/contrib/peak.py +++ b/python/tvm/contrib/peak.py @@ -93,7 +93,7 @@ def measure_bandwidth_sum( n //= lanes base_type = str(base_type) + str(bits) - dtype = base_type if lanes == 1 else base_type + "x" + str(lanes) + dtype = base_type if lanes == 1 else f"{base_type}x{str(lanes)}" k = te.reduce_axis((0, m), name="k") @@ -235,7 +235,7 @@ def measure_compute_mad( max_threads = target.max_num_threads base_type = str(base_type) + str(bits) - dtype = base_type if lanes == 1 else base_type + "x" + str(lanes) + dtype = base_type if lanes == 1 else f"{base_type}x{str(lanes)}" def extern(ins, outs): # pylint: disable=unused-argument diff --git a/python/tvm/contrib/pickle_memoize.py b/python/tvm/contrib/pickle_memoize.py index d875046038..9a30f35470 100644 --- a/python/tvm/contrib/pickle_memoize.py +++ b/python/tvm/contrib/pickle_memoize.py @@ -62,7 +62,7 @@ def __init__(self, key, save_at_exit): def save(self): if self.dirty: - print("Save memoize result to %s" % self.path) + print(f"Save memoize result to {self.path}") with open(self.path, "wb") as out_file: pickle.dump(self.cache, out_file, pickle.HIGHEST_PROTOCOL) @@ -94,7 +94,7 @@ def memoize(key, save_at_exit=False): def _register(f): """Registration function""" allow_types = (string_types, int, float, tuple) - fkey = key + "." + f.__name__ + ".pkl" + fkey = f"{key}.{f.__name__}.pkl" if fkey not in Cache.cache_by_key: Cache.cache_by_key[fkey] = Cache(fkey, save_at_exit) cache = Cache.cache_by_key[fkey] diff --git a/python/tvm/contrib/pipeline_executor.py b/python/tvm/contrib/pipeline_executor.py index b614630737..75a86353d2 100644 --- a/python/tvm/contrib/pipeline_executor.py +++ b/python/tvm/contrib/pipeline_executor.py @@ -297,24 +297,25 @@ def export_library(self, directory_path): if not os.path.exists(directory_path): raise RuntimeError("The directory {directory_path} does not exist.") # Create an load configuration. - load_config_file_name = "{}/load_config".format(directory_path) - pipeline_config_file_name = "{}/pipeline_config".format(directory_path) - config = {} - config["load_config"] = load_config_file_name - config["pipeline_config"] = pipeline_config_file_name + load_config_file_name = f"{directory_path}/load_config" + pipeline_config_file_name = f"{directory_path}/pipeline_config" + config = { + "load_config": load_config_file_name, + "pipeline_config": pipeline_config_file_name, + } + load_config = [] # Export the library, JSON, and parameter into files, then export these files path # into a configuration file. for lib_index in self.pipeline_mods: - mconfig = {} - mconfig["mod_idx"] = lib_index - mconfig["lib_name"] = "{}/lib{}.so".format(directory_path, lib_index) - mconfig["json_name"] = "{}/json{}".format(directory_path, lib_index) - mconfig["params_name"] = "{}/params{}".format(directory_path, lib_index) - mconfig["dev"] = "{},{}".format( - self.pipeline_mods[lib_index]["dev"].device_type, - self.pipeline_mods[lib_index]["dev"].device_id, - ) + mconfig = { + "mod_idx": lib_index, + "lib_name": f"{directory_path}/lib{lib_index}.so", + "json_name": f"{directory_path}/json{lib_index}", + "params_name": f"{directory_path}/params{lib_index}", + "dev": f'{self.pipeline_mods[lib_index]["dev"].device_type},{self.pipeline_mods[lib_index]["dev"].device_id}', + } + # Get the graph, lib, and parameters from GraphExecutorFactoryModule. lib = self.pipeline_mods[lib_index]["lib"] # Export the lib, graph, and parameters to disk. @@ -338,7 +339,7 @@ def export_library(self, directory_path): with open(pipeline_config_file_name, "w") as file_handle: json.dump(self.mods_config, file_handle) - config_file_name = "{}/config".format(directory_path) + config_file_name = f"{directory_path}/config" with open(config_file_name, "w") as file_handle: json.dump(config, file_handle) diff --git a/python/tvm/contrib/pipeline_executor_build.py b/python/tvm/contrib/pipeline_executor_build.py index 324383ab7c..6f6bcb24a1 100644 --- a/python/tvm/contrib/pipeline_executor_build.py +++ b/python/tvm/contrib/pipeline_executor_build.py @@ -82,7 +82,7 @@ def build(pipe_configs): mod_name=mod_config["mod_name"], ) - pipe_config["dev"] = "{},{}".format(dev.device_type, dev.device_id) + pipe_config["dev"] = f"{dev.device_type},{dev.device_id}" # Use "mod_idx" as the key to create a "module_connection" map which is not only # for the module index but also for the module connection used to build the pipeline. module_string_config[mod_idx] = pipe_config @@ -97,10 +97,11 @@ def build(pipe_configs): # "module_connection" information. The "input_connection" is used to record the # map of global input and subgraph input, and the "module_connection" is used to # record module dependency. - string_config = {} - string_config["param_connection"] = config["param_connection"] - string_config["input_connection"] = config["input_connection"] - string_config["module_connection"] = module_string_config + string_config = { + "param_connection": config["param_connection"], + "input_connection": config["input_connection"], + "module_connection": module_string_config, + } return PipelineExecutorFactoryModule(libs, string_config) @@ -122,22 +123,29 @@ def export_library(factory, directory_path): if not directory_path or not os.path.exists(directory_path): raise RuntimeError("The directory {directory_path} does not exist.") # Create an load configuration. - load_config_file_name = "{}/load_config".format(directory_path) - pipeline_config_file_name = "{}/pipeline_config".format(directory_path) - config = {} - config["load_config"] = load_config_file_name - config["pipeline_config"] = pipeline_config_file_name + load_config_file_name = f"{directory_path}/load_config" + pipeline_config_file_name = f"{directory_path}/pipeline_config" + config = { + "load_config": load_config_file_name, + "pipeline_config": pipeline_config_file_name, + } + load_config = [] # Export the library, JSON, and parameter into files, then export these files path # into a configuration file. for lib_index in factory.pipeline_mods: - mconfig = {} - mconfig["mod_idx"] = lib_index - mconfig["lib_name"] = "{}/lib{}.so".format(directory_path, lib_index) - mconfig["json_name"] = "{}/json{}".format(directory_path, lib_index) - mconfig["params_name"] = "{}/params{}".format(directory_path, lib_index) + mconfig = { + "mod_idx": lib_index, + "lib_name": f"{directory_path}/lib{lib_index}.so", + "json_name": f"{directory_path}/json{lib_index}", + "params_name": f"{directory_path}/params{lib_index}", + } + lib_config = factory.pipeline_mods[lib_index] - mconfig["dev"] = "{},{}".format(lib_config["dev"].device_type, lib_config["dev"].device_id) + mconfig[ + "dev" + ] = f'{lib_config["dev"].device_type},{lib_config["dev"].device_id}' + fcompile = lib_config["fcompile"] if not fcompile: fcompile = False @@ -159,7 +167,7 @@ def export_library(factory, directory_path): with open(pipeline_config_file_name, "w") as file_handle: json.dump(factory.mods_config, file_handle) - config_file_name = "{}/config".format(directory_path) + config_file_name = f"{directory_path}/config" with open(config_file_name, "w") as file_handle: json.dump(config, file_handle) @@ -228,7 +236,7 @@ def is_pipeline_executor_interface(self): def __repr__(self): # Geting the binding information in the form of text. - str_format = " |{}: ".format(self.name) + str_format = f" |{self.name}: " for binding in self.bindings: mname, dname = binding.get_name() str_format += "{0}:{1} ".format(mname, dname) @@ -245,7 +253,7 @@ def check_binding_dict(self, connection_dict): if "interface_name" not in connection_dict: raise RuntimeError('"inteface_name" is missing in global config!"') if "connection" not in connection_dict: - raise RuntimeError(f'"connection" is missing!"') + raise RuntimeError('"connection" is missing!"') # The global interface mapping should be one-to-one. if not connection_dict["connection"]: raise RuntimeError("The global interface map is empty!") @@ -354,10 +362,10 @@ def connect(self, binding): and self.data_type != binding.data_type ): raise RuntimeError( - f"Illegal type (%s vs. %s): binding type is not same!" - % (self.data_type, binding.data_type) + f"Illegal type ({self.data_type} vs. {binding.data_type}): binding type is not same!" ) + binding.parents.append(self) # Do acyclic check after increasing the in-degree of child node by setting @@ -465,7 +473,7 @@ def get_data_type(self, key, interface_type): if param.name_hint == key: return param._checked_type_ - if interface_type == "output": + elif interface_type == "output": if isinstance(self.output_type, tvm.ir.type.TupleType): if int(key) < len(self.output_type.fields): return self.output_type.fields[int(key)] @@ -477,13 +485,13 @@ def get_data_type(self, key, interface_type): def set_idx_name(self, idx): # Set the index value and generate the module name. self.idx = idx - self.name = "mod{}".format(str(idx)) + self.name = f"mod{str(idx)}" def is_root_mod(self): """Check whether this node is the root node in DAG, this function is used in topological sort. """ - return all([not b.parents for b in self.input_bindings.bindings.values()]) + return all(not b.parents for b in self.input_bindings.bindings.values()) def remove_self_from_bindings(self): """Remove the current node from child dependencies to reduce the in-degree @@ -524,7 +532,7 @@ def __str__(self): for interface in self.mod_wrapper[mod].output_bindings.bindings.values(): if interface.bindings: mname, dname = interface.get_name() - iname = mname + ".output(" + dname + ")->" + iname = f"{mname}.output({dname})->" for dep in interface.bindings: dep_mname, dep_dname = dep.get_name() if isinstance(dep.io_owner, PipelineConfig.ModuleWrapper): @@ -565,16 +573,12 @@ def get_config(self): # Use topological sort to get the correct order of modules. self.dag_topology_sort() - mconfig = {} module_connection = {} for mod in self.mod_wrapper: - # Generate pipeline configuration. - mconf = {} output_conf = [] module = self.mod_wrapper[mod] for _, binding in module.output_bindings.bindings.items(): dep_conf = [] - output = {} if binding.bindings: for dep in binding.bindings: dep_item = {} @@ -586,14 +590,14 @@ def get_config(self): dep_item["input_name"] = dname dep_conf.append(dep_item) - # The value of ouput_idx start from 0. - output["output_idx"] = int(binding.name) - output["dependencies"] = dep_conf + output = {"output_idx": int(binding.name), "dependencies": dep_conf} output_conf.append(output) - mconf["mod_idx"] = module.idx - mconf["cpu_affinity"] = module.cpu_affinity - mconf["output"] = output_conf + mconf = { + "mod_idx": module.idx, + "cpu_affinity": module.cpu_affinity, + "output": output_conf, + } module_connection[mod] = { "pipeline": mconf, @@ -631,10 +635,11 @@ def get_config(self): } param_connection.append(param_map) - mconfig["module_connection"] = module_connection - mconfig["input_connection"] = input_connection - mconfig["param_connection"] = param_connection - return mconfig + return { + "module_connection": module_connection, + "input_connection": input_connection, + "param_connection": param_connection, + } def dag_topology_sort(self): """Use topological sort to get order of pipeline modules.""" diff --git a/python/tvm/contrib/popen_pool.py b/python/tvm/contrib/popen_pool.py index 300bb25321..934e9030f6 100644 --- a/python/tvm/contrib/popen_pool.py +++ b/python/tvm/contrib/popen_pool.py @@ -122,30 +122,31 @@ def kill(self): ---- The worker can start a new process when send is called again. """ - if self._proc is not None: - # allow gracefully shutdown - try: - self._writer.close() - except IOError: - pass - try: - self._reader.close() - except IOError: - pass - # kill all child processes recursively - try: - kill_child_processes(self._proc.pid) - except TypeError: - pass - try: - self._proc.kill() - except OSError: - pass + if self._proc is None: + return + # allow gracefully shutdown + try: + self._writer.close() + except IOError: + pass + try: + self._reader.close() + except IOError: + pass + # kill all child processes recursively + try: + kill_child_processes(self._proc.pid) + except TypeError: + pass + try: + self._proc.kill() + except OSError: + pass - # Join the child process to avoid zombie processes - self.join(timeout=1.0) - self._proc = None - self._remaining_uses = None + # Join the child process to avoid zombie processes + self.join(timeout=1.0) + self._proc = None + self._remaining_uses = None def _start(self): """Start a new subprocess if nothing is available""" @@ -194,9 +195,7 @@ def join(self, timeout=None): def is_alive(self): """Check if the process is alive""" - if self._proc: - return self._proc.poll() is None - return False + return self._proc.poll() is None if self._proc else False def send(self, fn, args=(), kwargs=None, timeout=None): """Send a new function task fn(*args, **kwargs) to the subprocess. @@ -238,7 +237,7 @@ def send(self, fn, args=(), kwargs=None, timeout=None): # N.B. The initializer doesn't count as a "use" self._remaining_uses = self._maximum_uses - kwargs = {} if not kwargs else kwargs + kwargs = kwargs or {} data = cloudpickle.dumps((fn, args, kwargs, timeout), protocol=pickle.HIGHEST_PROTOCOL) try: self._writer.write(struct.pack("= 3.9 + p = join(rocdl_dir, f"{n}.bc") if not exists(p): # rocm <= 3.8 - p = join(rocdl_dir, n + ".amdgcn.bc") + p = join(rocdl_dir, f"{n}.amdgcn.bc") if exists(p): bitcode_files.append(p) elif "isa_version" not in n and n not in {"irif"}: - raise RuntimeError("could not find bitcode " + n) + raise RuntimeError(f"could not find bitcode {n}") return tvm.runtime.convert(bitcode_files) diff --git a/python/tvm/contrib/sdaccel.py b/python/tvm/contrib/sdaccel.py index 930752c2bc..10e3df108f 100644 --- a/python/tvm/contrib/sdaccel.py +++ b/python/tvm/contrib/sdaccel.py @@ -65,8 +65,8 @@ def compile_vhls(kernel_info, device_name): funcname = funcname.value code = code.value - tmp_cpp = tmp_dir.relpath(funcname + ".cpp") - tmp_xo = tmp_dir.relpath(funcname + ".xo") + tmp_cpp = tmp_dir.relpath(f"{funcname}.cpp") + tmp_xo = tmp_dir.relpath(f"{funcname}.xo") with open(tmp_cpp, "wb") as out_file: out_file.write(bytes(code)) diff --git a/python/tvm/contrib/sparse.py b/python/tvm/contrib/sparse.py index d515f58f9d..ba689b167f 100644 --- a/python/tvm/contrib/sparse.py +++ b/python/tvm/contrib/sparse.py @@ -73,13 +73,12 @@ def __init__(self, arg1, device=None, shape=None): assert self.shape is not None assert isinstance(self.data, _nd.NDArray) assert isinstance(self.indices, _nd.NDArray) - assert str(self.indices.dtype) == "int32" or str(self.indices.dtype) == "int64", str( + assert str(self.indices.dtype) in {"int32", "int64"}, str( self.indices.dtype ) + assert isinstance(self.indptr, _nd.NDArray) - assert str(self.indptr.dtype) == "int32" or str(self.indptr.dtype) == "int64", str( - self.indptr.dtype - ) + assert str(self.indptr.dtype) in {"int32", "int64"}, str(self.indptr.dtype) def asnumpy(self): """Construct a full matrix and convert it to numpy array. This API will be deprecated @@ -106,7 +105,7 @@ def array(source_array, device=None, shape=None, stype="csr"): if stype == "csr": ret = CSRNDArray(source_array, shape=shape, device=device) else: - raise NotImplementedError("stype=%s is not supported yet." % (stype,)) + raise NotImplementedError(f"stype={stype} is not supported yet.") return ret @@ -159,9 +158,15 @@ def __init__(self, shape, nonzeros, dtype, name): """ SparsePlaceholderOp.__init__(self, shape, nonzeros, dtype, name) self.stype = "csr" - self.data = te.placeholder((nonzeros,), dtype=dtype, name=self.name + "_data") - self.indices = te.placeholder((nonzeros,), dtype=itype, name=self.name + "_indices") - self.indptr = te.placeholder((self.shape[0] + 1,), dtype=itype, name=self.name + "_indptr") + self.data = te.placeholder((nonzeros,), dtype=dtype, name=f"{self.name}_data") + self.indices = te.placeholder( + (nonzeros,), dtype=itype, name=f"{self.name}_indices" + ) + + self.indptr = te.placeholder( + (self.shape[0] + 1,), dtype=itype, name=f"{self.name}_indptr" + ) + assert isinstance(self.data, _tensor.Tensor) assert isinstance(self.indices, _tensor.Tensor) assert isinstance(self.indptr, _tensor.Tensor) @@ -200,5 +205,5 @@ def placeholder(shape, nonzeros=None, dtype=None, name="placeholder", stype=None if stype == "csr": ret = CSRPlaceholderOp(shape=shape, nonzeros=nonzeros, dtype=dtype, name=name) else: - raise NotImplementedError("stype=%s is not supported yet." % (stype,)) + raise NotImplementedError(f"stype={stype} is not supported yet.") return ret diff --git a/python/tvm/contrib/tar.py b/python/tvm/contrib/tar.py index 354887730f..5377485f04 100644 --- a/python/tvm/contrib/tar.py +++ b/python/tvm/contrib/tar.py @@ -43,7 +43,7 @@ def tar(output, files): for fname in files: base = os.path.basename(fname) if base in fset: - raise ValueError("duplicate file name %s" % base) + raise ValueError(f"duplicate file name {base}") fset.add(base) shutil.copy(fname, temp.relpath(base)) cmd += [output] diff --git a/python/tvm/contrib/tedd.py b/python/tvm/contrib/tedd.py index a65f5e474a..894756f3ab 100644 --- a/python/tvm/contrib/tedd.py +++ b/python/tvm/contrib/tedd.py @@ -55,7 +55,7 @@ def dom_path_to_string(dom_path, prefix=""): path_string = prefix for index in dom_path: - path_string = path_string + "_" + str(index) + path_string = f"{path_string}_{str(index)}" return path_string @@ -123,18 +123,21 @@ def get_or_create_dot_id(obj, prefix="", assert_on_missing=False): get_or_create_dot_id.obj_id_dict = {} if obj not in get_or_create_dot_id.obj_id_dict: if assert_on_missing: - assert False, "dot_id " + str(obj) + " has not been registered." + assert False, f"dot_id {str(obj)} has not been registered." else: get_or_create_dot_id.obj_id_dict[obj] = prefix + hex(id(obj)) return get_or_create_dot_id.obj_id_dict[obj] def get_port_id(is_input, index): - return "I_" + str(index) if is_input else "O_" + str(index) + return f"I_{str(index)}" if is_input else f"O_{str(index)}" def get_itervar_type_info(iter_type): - assert iter_type < len(ITERVAR_TYPE_STRING_MAP), "Unknown IterVar type: " + str(iter_type) + assert iter_type < len( + ITERVAR_TYPE_STRING_MAP + ), f"Unknown IterVar type: {str(iter_type)}" + return ITERVAR_TYPE_STRING_MAP[iter_type] @@ -222,20 +225,17 @@ def dump_graph(dot_string, show_svg=True, dot_file_path="", output_dot_string=Fa """Output dot_string in various formats.""" if dot_file_path: try: - dot_file = open(dot_file_path, "w+") - dot_file.write(dot_string) - dot_file.close() + with open(dot_file_path, "w+") as dot_file: + dot_file.write(dot_string) except IOError: - print("Cannot open file: " + dot_file_path) + print(f"Cannot open file: {dot_file_path}") if show_svg: from IPython.display import display from IPython.display import SVG src = Source(dot_string) display(SVG(src.pipe(format="svg"))) - if output_dot_string: - return dot_string - return None + return dot_string if output_dot_string else None def dump_json(sch, need_range): @@ -266,7 +266,12 @@ def encode_itervar(itervar, stage, index, range_map): if attr.tensor_intrin is not None: tensor_intrin = str(attr.tensor_intrin.body) # remove the final \n - tensor_intrin = tensor_intrin[0:-1] if tensor_intrin[-1] == "\n" else tensor_intrin + tensor_intrin = ( + tensor_intrin[:-1] + if tensor_intrin[-1] == "\n" + else tensor_intrin + ) + else: tensor_intrin = None else: @@ -626,25 +631,25 @@ def itervar_relation_dot(g, node, node_id): parent = dom_path_to_string(node["parent"], "IterVar") outer = dom_path_to_string(node["outer"], "IterVar") inner = dom_path_to_string(node["inner"], "IterVar") - g.edge(parent + ":itervar", node_id + ":Input") - g.edge(node_id + ":Outer", outer + ":itervar") - g.edge(node_id + ":Inner", inner + ":itervar") + g.edge(f"{parent}:itervar", f"{node_id}:Input") + g.edge(f"{node_id}:Outer", f"{outer}:itervar") + g.edge(f"{node_id}:Inner", f"{inner}:itervar") elif node_type == "Fuse_Relation": node_type = "Fuse" itervar_relation_node_dot(g, node_id, node_type, ["Outer", "Inner"], ["Fused"]) fused = dom_path_to_string(node["fused"], "IterVar") outer = dom_path_to_string(node["outer"], "IterVar") inner = dom_path_to_string(node["inner"], "IterVar") - g.edge(outer + ":itervar", node_id + ":Outer") - g.edge(inner + ":itervar", node_id + ":Inner") - g.edge(node_id + ":Fused", fused + ":itervar") + g.edge(f"{outer}:itervar", f"{node_id}:Outer") + g.edge(f"{inner}:itervar", f"{node_id}:Inner") + g.edge(f"{node_id}:Fused", f"{fused}:itervar") elif node_type == "Singleton_Relation": node_type = "Singleton" itervar_relation_node_dot(g, node_id, node_type, [], ["Iter"]) itervar = dom_path_to_string(node["inner"], "IterVar") - g.edge(node_id + ":Iter", itervar + ":itervar") + g.edge(f"{node_id}:Iter", f"{itervar}:itervar") else: - assert False, "Unknown IterVarRelationNode: " + node_type + assert False, f"Unknown IterVarRelationNode: {node_type}" def stage_node_dot(g, stage): """Create a stage node.""" diff --git a/python/tvm/contrib/thrust.py b/python/tvm/contrib/thrust.py index 7fe0077c2b..5cd00718d1 100644 --- a/python/tvm/contrib/thrust.py +++ b/python/tvm/contrib/thrust.py @@ -21,7 +21,10 @@ def maybe_warn(target, func_name): - if get_global_func(func_name, allow_missing=True) and not "thrust" in target.libs: + if ( + get_global_func(func_name, allow_missing=True) + and "thrust" not in target.libs + ): logging.warning("TVM is built with thrust but thrust is not used.") if "thrust" in target.libs and get_global_func(func_name, allow_missing=True) is None: logging.warning("thrust is requested but TVM is not built with thrust.") diff --git a/python/tvm/contrib/utils.py b/python/tvm/contrib/utils.py index 89688b5bf8..a02d85d71d 100644 --- a/python/tvm/contrib/utils.py +++ b/python/tvm/contrib/utils.py @@ -105,13 +105,12 @@ def __init__(self, custom_path=None, keep_for_debug=None): if custom_path: os.mkdir(custom_path) self.temp_dir = custom_path + elif self._created_with_keep_for_debug: + parent_dir = self._get_debug_parent_dir() + self.temp_dir = f"{parent_dir}/{self._increment_num_tempdir_created():05d}" + os.mkdir(self.temp_dir) else: - if self._created_with_keep_for_debug: - parent_dir = self._get_debug_parent_dir() - self.temp_dir = f"{parent_dir}/{self._increment_num_tempdir_created():05d}" - os.mkdir(self.temp_dir) - else: - self.temp_dir = tempfile.mkdtemp() + self.temp_dir = tempfile.mkdtemp() if not self._created_with_keep_for_debug: self.TEMPDIRS.add(self.temp_dir) diff --git a/python/tvm/contrib/xcode.py b/python/tvm/contrib/xcode.py index 6d5e10f611..c9503161cb 100644 --- a/python/tvm/contrib/xcode.py +++ b/python/tvm/contrib/xcode.py @@ -50,15 +50,13 @@ def __get_min_os_version(sdk): return None if sdk == "iphoneos": return "13.0" - raise RuntimeError("Unsupported sdk: %s" % sdk) + raise RuntimeError(f"Unsupported sdk: {sdk}") def __get_min_os_version_cmd(sdk, min_os_version): if min_os_version is None: min_os_version = __get_min_os_version(sdk) - if min_os_version is not None: - return "-mios-version-min=" + min_os_version - return "" + return "" if min_os_version is None else f"-mios-version-min={min_os_version}" def create_dylib(output, objects, arch, sdk="macosx", min_os_version=None): @@ -89,11 +87,7 @@ def create_dylib(output, objects, arch, sdk="macosx", min_os_version=None): cmd += ["-isysroot", sdk_path] cmd += [__get_min_os_version_cmd(sdk, min_os_version)] cmd += ["-o", output] - if isinstance(objects, str): - cmd += [objects] - else: - cmd += objects - + cmd += [objects] if isinstance(objects, str) else objects proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) (out, _) = proc.communicate() @@ -133,7 +127,7 @@ def compile_metal(code, path_target=None, sdk="macosx", min_os_version=None): with open(temp_code, "w") as out_file: out_file.write(code) - file_target = path_target if path_target else temp_target + file_target = path_target or temp_target # See: # - https://developer.apple.com/documentation/metal/gpu_functions_libraries/building_a_library_with_metal_s_command-line_tools#overview # pylint: disable=line-too-long @@ -146,7 +140,7 @@ def compile_metal(code, path_target=None, sdk="macosx", min_os_version=None): elif sdk in ("iphoneos", "iphonesimulator"): language_version = "-std=ios-metal2.3" else: - raise RuntimeError("Unsupported sdk: %s" % sdk) + raise RuntimeError(f"Unsupported sdk: {sdk}") cmd1 = ["xcrun", "-sdk", sdk, "metal", language_version, min_target, "-O3"] cmd1 += ["-c", temp_code, "-o", temp_ir] cmd2 = ["xcrun", "-sdk", sdk, "metallib"] @@ -158,20 +152,18 @@ def compile_metal(code, path_target=None, sdk="macosx", min_os_version=None): stderr=subprocess.STDOUT, ) (out, _) = proc.communicate() - if proc.returncode != 0: - sys.stderr.write("Compilation error:\n") - sys.stderr.write(py_str(out)) - sys.stderr.flush() - libbin = None - else: - libbin = bytearray(open(file_target, "rb").read()) - return libbin + if proc.returncode == 0: + return bytearray(open(file_target, "rb").read()) + sys.stderr.write("Compilation error:\n") + sys.stderr.write(py_str(out)) + sys.stderr.flush() + return None def compile_coreml(model, model_name="main", out_dir="."): """Compile coreml model and return the compiled model path.""" - mlmodel_path = os.path.join(out_dir, model_name + ".mlmodel") - mlmodelc_path = os.path.join(out_dir, model_name + ".mlmodelc") + mlmodel_path = os.path.join(out_dir, f"{model_name}.mlmodel") + mlmodelc_path = os.path.join(out_dir, f"{model_name}.mlmodelc") metadata = {"inputs": list(model.input_description), "outputs": list(model.output_description)} # Use the description field to send info to CoreML runtime model.short_description = json.dumps(metadata) @@ -179,6 +171,6 @@ def compile_coreml(model, model_name="main", out_dir="."): res = xcrun(["coremlcompiler", "compile", mlmodel_path, out_dir]) if not os.path.isdir(mlmodelc_path): - raise RuntimeError("Compile failed: %s" % res) + raise RuntimeError(f"Compile failed: {res}") return mlmodelc_path diff --git a/python/tvm/support.py b/python/tvm/support.py index ccd6f59e32..eede61f70d 100644 --- a/python/tvm/support.py +++ b/python/tvm/support.py @@ -36,20 +36,17 @@ def libinfo(): The dictionary of compile-time info. """ get_lib_info_func = get_global_func("support.GetLibInfo", allow_missing=True) - if get_lib_info_func is not None: - lib_info = get_lib_info_func() - if lib_info is None: - return {} - else: + if get_lib_info_func is None: return {} - return dict(lib_info.items()) + lib_info = get_lib_info_func() + return {} if lib_info is None else dict(lib_info.items()) def describe(): """ Print out information about TVM and the current Python environment """ - info = list((k, v) for k, v in libinfo().items()) + info = list(libinfo().items()) info = dict(sorted(info, key=lambda x: x[0])) print("Python Environment") sys_version = sys.version.replace("\n", " ") diff --git a/version.py b/version.py index b739d82b73..d1d45873ed 100644 --- a/version.py +++ b/version.py @@ -133,8 +133,8 @@ def git_describe_version(): else: dev_version = arr_info[0] - pub_ver = "%s.dev%s" % (dev_version, arr_info[1]) - local_ver = "%s+%s" % (pub_ver, arr_info[2]) + pub_ver = f"{dev_version}.dev{arr_info[1]}" + local_ver = f"{pub_ver}+{arr_info[2]}" return pub_ver, local_ver @@ -145,20 +145,19 @@ def update(file_name, pattern, repl, dry_run=False): need_update = False with open(file_name) as file: for l in file: - result = re.findall(pattern, l) - if result: + if result := re.findall(pattern, l): assert len(result) == 1 hit_counter += 1 if result[0] != repl: l = re.sub(pattern, repl, l) need_update = True - print("%s: %s -> %s" % (file_name, result[0], repl)) + print(f"{file_name}: {result[0]} -> {repl}") else: - print("%s: version is already %s" % (file_name, repl)) + print(f"{file_name}: version is already {repl}") update.append(l) if hit_counter != 1: - raise RuntimeError("Cannot find version in %s" % file_name) + raise RuntimeError(f"Cannot find version in {file_name}") if need_update and not dry_run: with open(file_name, "w") as output_file: @@ -194,7 +193,12 @@ def sync_version(pub_ver, local_ver, dry_run): # web # change to pre-release convention by npm dev_pos = pub_ver.find(".dev") - npm_ver = pub_ver if dev_pos == -1 else "%s.0-%s" % (pub_ver[:dev_pos], pub_ver[dev_pos + 1 :]) + npm_ver = ( + pub_ver + if dev_pos == -1 + else f"{pub_ver[:dev_pos]}.0-{pub_ver[dev_pos + 1:]}" + ) + update( os.path.join(PROJ_ROOT, "web", "package.json"), r'(?<="version": ")[.0-9a-z\-\+]+',