From a53fdaa23f5c0dddc0f5b69cb73bd724c36f5e58 Mon Sep 17 00:00:00 2001 From: Ilia Cherniavskii Date: Thu, 6 Aug 2020 01:50:26 -0700 Subject: [PATCH] Remove ProfiledType (#42570) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/42570 ProfiledType doesn't do anything and is not used atm, removing Test Plan: CI Reviewed By: ezyang Differential Revision: D22938664 Pulled By: ilia-cher fbshipit-source-id: 037c512938028f44258b702bbcde3f8c144f4aa0 --- BUILD.bazel | 6 -- c10/core/DispatchKey.cpp | 2 - c10/core/DispatchKey.h | 2 - caffe2/CMakeLists.txt | 6 -- test/cpp/jit/test_misc.cpp | 7 -- tools/autograd/gen_variable_type.py | 90 ------------------- tools/autograd/templates/ProfiledType.cpp | 36 -------- tools/build_variables.bzl | 10 --- torch/csrc/autograd/profiler.cpp | 9 -- .../passes/onnx/unpack_quantized_weights.cpp | 6 -- 10 files changed, 174 deletions(-) delete mode 100644 tools/autograd/templates/ProfiledType.cpp diff --git a/BUILD.bazel b/BUILD.bazel index d87dd9baae4f44..0e4e65659f0800 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -186,12 +186,6 @@ libtorch_cpp_generated_sources = [ "torch/csrc/autograd/generated/VariableType_3.cpp", "torch/csrc/autograd/generated/VariableType_4.cpp", # "torch/csrc/autograd/generated/VariableTypeEverything.cpp", - "torch/csrc/autograd/generated/ProfiledType_0.cpp", - "torch/csrc/autograd/generated/ProfiledType_1.cpp", - "torch/csrc/autograd/generated/ProfiledType_2.cpp", - "torch/csrc/autograd/generated/ProfiledType_3.cpp", - "torch/csrc/autograd/generated/ProfiledType_4.cpp", - # "torch/csrc/autograd/generated/ProfiledTypeEverything.cpp", "torch/csrc/autograd/generated/TraceType_0.cpp", "torch/csrc/autograd/generated/TraceType_1.cpp", "torch/csrc/autograd/generated/TraceType_2.cpp", diff --git a/c10/core/DispatchKey.cpp b/c10/core/DispatchKey.cpp index de808337111479..4ce9ecd1bfb2bf 100644 --- a/c10/core/DispatchKey.cpp +++ b/c10/core/DispatchKey.cpp @@ -50,8 +50,6 @@ const char* toString(DispatchKey t) { return "Autocast"; case DispatchKey::TESTING_ONLY_GenericWrapper: return "TESTING_ONLY_GenericWrapper"; - case DispatchKey::Profiler: - return "Profiler"; case DispatchKey::Named: return "Named"; case DispatchKey::Tracer: diff --git a/c10/core/DispatchKey.h b/c10/core/DispatchKey.h index 5605a078b646c2..1a7c63f11b2ac1 100644 --- a/c10/core/DispatchKey.h +++ b/c10/core/DispatchKey.h @@ -189,8 +189,6 @@ enum class DispatchKey : uint8_t { // the bulk of this logic. Autograd, - Profiler, - Tracer, // Pre-autograd dispatch keys allow backends to override the autograd behavior diff --git a/caffe2/CMakeLists.txt b/caffe2/CMakeLists.txt index f778dcc6786396..380a4839e91b6b 100644 --- a/caffe2/CMakeLists.txt +++ b/caffe2/CMakeLists.txt @@ -309,11 +309,6 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE) "${TORCH_SRC_DIR}/csrc/autograd/generated/VariableType_2.cpp" "${TORCH_SRC_DIR}/csrc/autograd/generated/VariableType_3.cpp" "${TORCH_SRC_DIR}/csrc/autograd/generated/VariableType_4.cpp" - "${TORCH_SRC_DIR}/csrc/autograd/generated/ProfiledType_0.cpp" - "${TORCH_SRC_DIR}/csrc/autograd/generated/ProfiledType_1.cpp" - "${TORCH_SRC_DIR}/csrc/autograd/generated/ProfiledType_2.cpp" - "${TORCH_SRC_DIR}/csrc/autograd/generated/ProfiledType_3.cpp" - "${TORCH_SRC_DIR}/csrc/autograd/generated/ProfiledType_4.cpp" "${TORCH_SRC_DIR}/csrc/autograd/generated/TraceType_0.cpp" "${TORCH_SRC_DIR}/csrc/autograd/generated/TraceType_1.cpp" "${TORCH_SRC_DIR}/csrc/autograd/generated/TraceType_2.cpp" @@ -371,7 +366,6 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE) "${CMAKE_BINARY_DIR}/aten/src/ATen/Declarations.yaml" "${TOOLS_PATH}/autograd/templates/VariableType.h" "${TOOLS_PATH}/autograd/templates/VariableType.cpp" - "${TOOLS_PATH}/autograd/templates/ProfiledType.cpp" "${TOOLS_PATH}/autograd/templates/TraceType.cpp" "${TOOLS_PATH}/autograd/templates/Functions.h" "${TOOLS_PATH}/autograd/templates/Functions.cpp" diff --git a/test/cpp/jit/test_misc.cpp b/test/cpp/jit/test_misc.cpp index 8ec16818b5155b..aa4d2fb2fb31c5 100644 --- a/test/cpp/jit/test_misc.cpp +++ b/test/cpp/jit/test_misc.cpp @@ -821,8 +821,6 @@ void checkScopeCallbacks() { } void testRecordFunction() { - // enable observers - c10::impl::IncludeDispatchKeyGuard observer_guard(c10::DispatchKey::Profiler); // disabling the inlining of method calls GraphOptimizerEnabledGuard opt_guard(false); @@ -1016,8 +1014,6 @@ void testRecordFunction() { ids.clear(); auto th = std::thread([&ids]() { - c10::impl::IncludeDispatchKeyGuard observer_guard( - c10::DispatchKey::Profiler); addThreadLocalCallback(RecordFunctionCallback( [&ids](const RecordFunction& fn) { ids.push_back(2); }, [](const RecordFunction&) {})); @@ -1128,9 +1124,6 @@ void checkDebugInfo(c10::DebugInfoKind kind, int model_id) { } void testThreadLocalDebugInfo() { - // enable observers - c10::impl::IncludeDispatchKeyGuard observer_guard(c10::DispatchKey::Profiler); - TORCH_CHECK( c10::ThreadLocalDebugInfo::get(c10::DebugInfoKind::TEST_INFO) == nullptr); auto debug_info = std::make_shared(); diff --git a/tools/autograd/gen_variable_type.py b/tools/autograd/gen_variable_type.py index a244c22d52648b..1ab895bbd67dae 100644 --- a/tools/autograd/gen_variable_type.py +++ b/tools/autograd/gen_variable_type.py @@ -364,23 +364,6 @@ ${return_type} ${api_name}(${declaration_formals}); // {"schema": "${schema_string}", "compound": "${compound}"} """) -# TODO(iliacher): remove Profile wrappers -# ProfiledType templates -# See NOTE[UnboxedOnly] in function_wrapper.py -UNBOXED_PROFILE_DISPATCH = CodeTemplate("""\ -static auto op = c10::Dispatcher::singleton() - .findSchemaOrThrow("aten::${operator_name}", "${overload_name}") - .typed<${return_type} (${profiled_arg_types})>(); -return c10::Dispatcher::singleton().redispatch<${profiled_ret_and_arg_types}>(${profiled_dispatch_args}); -""") -PROFILE_DISPATCH = CodeTemplate("""\ -static auto op = c10::Dispatcher::singleton() - .findSchemaOrThrow("aten::${operator_name}", "${overload_name}") - .typed<${return_type} (${profiled_arg_types})>(); -return c10::Dispatcher::singleton().redispatch<${profiled_ret_and_arg_types}>(${profiled_dispatch_args}); -""") - - # TraceType templates # TODO: change `redispatch` to `NoTracerDispatchMode` + regular `call`. # See NOTE[UnboxedOnly] in function_wrapper.py @@ -678,14 +661,11 @@ def gen_variable_type(out, aten_declarations, template_path): def gen_variable_type_shard(out, aten_declarations, template_path, suffix, header): VARIABLE_TYPE_H = CodeTemplate.from_file(template_path + '/VariableType.h') VARIABLE_TYPE_CPP = CodeTemplate.from_file(template_path + '/VariableType.cpp') - PROFILED_TYPE_CPP = CodeTemplate.from_file(template_path + '/ProfiledType.cpp') TRACE_TYPE_CPP = CodeTemplate.from_file(template_path + '/TraceType.cpp') type_declarations = [] type_definitions = [] wrapper_registrations = [] - profiled_method_definitions = [] - profiled_wrapper_registrations = [] trace_method_definitions = [] trace_wrapper_registrations = [] @@ -708,19 +688,6 @@ def gen_variable_type_shard(out, aten_declarations, template_path, suffix, heade # See Note [Manual catchAll kernels] assert (declaration['name'] in MANUAL_CATCHALL) == declaration['manual_kernel_registration'] - # Emit ProfiledType code - profiled_body = emit_profiled_body(declaration) - profiled_method_definitions.append(METHOD_DEFINITION.substitute( - declaration, type_definition_body=profiled_body)) - - if declaration['use_c10_dispatcher'] == 'full': - profiled_wrapper_registrations.append(WRAPPER_REGISTRATION.substitute( - declaration, class_type='ProfiledType')) - else: - assert declaration['use_c10_dispatcher'] == 'with_codegenerated_unboxing_wrapper' - profiled_wrapper_registrations.append(UNBOXEDONLY_WRAPPER_REGISTRATION.substitute( - declaration, class_type='ProfiledType')) - # Emit TraceType code if declaration['name'] not in MANUAL_TRACER: trace_body = emit_trace_body(declaration) @@ -738,8 +705,6 @@ def gen_variable_type_shard(out, aten_declarations, template_path, suffix, heade 'type_derived_method_declarations': type_declarations, 'type_derived_method_definitions': type_definitions, 'wrapper_registrations': wrapper_registrations, - 'profiled_method_definitions': profiled_method_definitions, - 'profiled_wrapper_registrations': profiled_wrapper_registrations, 'trace_method_definitions': trace_method_definitions, 'trace_wrapper_registrations': trace_wrapper_registrations, } @@ -747,64 +712,9 @@ def gen_variable_type_shard(out, aten_declarations, template_path, suffix, heade write(out, 'VariableType.h', VARIABLE_TYPE_H, env) else: write(out, 'VariableType%s.cpp' % suffix, VARIABLE_TYPE_CPP, env) - write(out, 'ProfiledType%s.cpp' % suffix, PROFILED_TYPE_CPP, env) write(out, 'TraceType%s.cpp' % suffix, TRACE_TYPE_CPP, env) -def emit_profiled_body(declaration): - arguments = declaration['arguments'] - returns = declaration['returns'] - func = declaration['derivative'] - name = declaration['name'] - inplace = declaration['inplace'] - is_out_fn = name.endswith('_out') - modifies_arguments = inplace or is_out_fn - returns_void = len(returns) == 0 - - processed_args = [] - for a in arguments: - processed_args.append('{}'.format(a['name'])) - - arg_types = ', '.join([a['type'] for a in declaration['arguments']]) - ret_and_arg_types = ', '.join([declaration['return_type']] + [a['type'] for a in declaration['arguments']]) - schema_order_arg_types = ', '.join([a['type'] for a in declaration['schema_order_arguments']]) - schema_order_ret_and_arg_types = ', '.join( - [declaration['return_type']] + [a['type'] for a in declaration['schema_order_arguments']]) - - def check_record_function_input_type(simple_type): - return simple_type in ['Tensor', 'Scalar'] - - def record_function_input_names(): - return ', '.join([ - arg['name'] for arg in declaration['arguments'] - if check_record_function_input_type(arg['simple_type'])]) - - profiled_dispatch_args = ['op', 'c10::DispatchKey::Profiler'] + declaration['args'] - schema_order_profiled_dispatch_args = ['op', 'c10::DispatchKey::Profiler'] + declaration['schema_order_args'] - - if declaration['use_c10_dispatcher'] == 'full': - profiled_arg_types = schema_order_arg_types - profiled_ret_and_arg_types = schema_order_ret_and_arg_types - profiled_dispatch_args = schema_order_profiled_dispatch_args - else: - assert declaration['use_c10_dispatcher'] == 'with_codegenerated_unboxing_wrapper' - profiled_arg_types = arg_types - profiled_ret_and_arg_types = ret_and_arg_types - profiled_dispatch_args = profiled_dispatch_args - - call = PROFILE_DISPATCH.substitute( - declaration, - name=name, - input_names=record_function_input_names(), - return_type=declaration['return_type'], - profiled_arg_types=profiled_arg_types, - profiled_ret_and_arg_types=profiled_ret_and_arg_types, - profiled_dispatch_args=profiled_dispatch_args, - ) - - return [call] - - def emit_trace_body(declaration): returns = declaration['returns'] name = declaration['name'] diff --git a/tools/autograd/templates/ProfiledType.cpp b/tools/autograd/templates/ProfiledType.cpp deleted file mode 100644 index 1613d6e36deab6..00000000000000 --- a/tools/autograd/templates/ProfiledType.cpp +++ /dev/null @@ -1,36 +0,0 @@ -#include "torch/csrc/autograd/VariableTypeUtils.h" - -#include -#include -#include - -#include "torch/csrc/autograd/function.h" - -#include "ATen/quantized/Quantizer.h" - -// ${generated_comment} - -// NOTE See [Sharded File] comment in VariableType - -using namespace at; -using namespace torch::autograd::generated; -using torch::autograd::Node; - -namespace torch { - -namespace ProfiledType { - -namespace { -${profiled_method_definitions} -} // namespace -} // namespace ProfiledType - -namespace { - -TORCH_LIBRARY_IMPL(aten, Profiler, m) { - ${profiled_wrapper_registrations}; -} - -} // namespace - -} // namespace torch diff --git a/tools/build_variables.bzl b/tools/build_variables.bzl index 6e13c60533b759..ae22532d9fcf3d 100644 --- a/tools/build_variables.bzl +++ b/tools/build_variables.bzl @@ -10,11 +10,6 @@ GENERATED_CPP = [ "jit/generated/generated_unboxing_wrappers_0.cpp", "jit/generated/generated_unboxing_wrappers_1.cpp", "jit/generated/generated_unboxing_wrappers_2.cpp", - "autograd/generated/ProfiledType_0.cpp", - "autograd/generated/ProfiledType_1.cpp", - "autograd/generated/ProfiledType_2.cpp", - "autograd/generated/ProfiledType_3.cpp", - "autograd/generated/ProfiledType_4.cpp", "autograd/generated/TraceType_0.cpp", "autograd/generated/TraceType_1.cpp", "autograd/generated/TraceType_2.cpp", @@ -38,11 +33,6 @@ def libtorch_generated_sources(gencode_pattern): "autograd/generated/VariableType_2.cpp", "autograd/generated/VariableType_3.cpp", "autograd/generated/VariableType_4.cpp", - "autograd/generated/ProfiledType_0.cpp", - "autograd/generated/ProfiledType_1.cpp", - "autograd/generated/ProfiledType_2.cpp", - "autograd/generated/ProfiledType_3.cpp", - "autograd/generated/ProfiledType_4.cpp", "autograd/generated/TraceType_0.cpp", "autograd/generated/TraceType_1.cpp", "autograd/generated/TraceType_2.cpp", diff --git a/torch/csrc/autograd/profiler.cpp b/torch/csrc/autograd/profiler.cpp index 7ec52cbb52cb65..0c86902dad58a9 100644 --- a/torch/csrc/autograd/profiler.cpp +++ b/torch/csrc/autograd/profiler.cpp @@ -663,12 +663,3 @@ void RecordProfile::processEvents(const std::vector& events) { } }}} - -void profile_wrapper(const c10::OperatorHandle& op, torch::jit::Stack* stack) { - c10::impl::ExcludeDispatchKeyGuard key_guard(c10::DispatchKey::Profiler); - op.callBoxed(stack); -} - -TORCH_LIBRARY_IMPL(_, Profiler, m) { - m.fallback(torch::CppFunction::makeFromBoxedFunction<&profile_wrapper>()); -} diff --git a/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp b/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp index 4be26c9bc00633..18364bac9aa63d 100644 --- a/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp +++ b/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp @@ -205,12 +205,6 @@ void unpackQuantizedWeightsHelper( .findSchemaOrThrow(unpack_fn.c_str(), "") .typed>( at::Tensor)>(); - // Temporary hack: when the `Profiler` dispatch key is inserted, this call - // will fail since the `unpack()` ops return multiple values, however the - // boxing code currently does not support this. Instead, exclude the - // Profiler dispatch key and go through unboxed dispatch, avoiding boxing - // altogether - c10::impl::ExcludeDispatchKeyGuard key_guard(c10::DispatchKey::Profiler); std::tie(unpacked_weight, bias) = op.call(packed_weight); }