From f87f1d08e83a31cae7a3e6680c7e03b82a1b1710 Mon Sep 17 00:00:00 2001 From: Mike Iovine Date: Thu, 9 Dec 2021 16:59:58 -0800 Subject: [PATCH] [SR] assignStorageToManagedTensors returns a vector (#69568) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/69568 Non-empty vectors should never be passed to `assignStorageToManagedTensors` and `assignStorageToManagedOutputTensors`. Presumably, this out-variant convention was adopted to avoid move-assigning the corresponding attribtues in `MemoryPlanner`. But the cost of a vector move-assign is not high, and this function type signature is safer. Test Plan: `buck test caffe2/bechmarks/static_runtime:static_runtime_cpptest` Reviewed By: donaldong Differential Revision: D32729289 fbshipit-source-id: 88f19de8eb89d8a4f1dd8bbd4d9e7f686e41888b --- .../static_runtime/test_static_module.cc | 5 ++-- .../jit/runtime/static/memory_planner.cpp | 25 +++++++++---------- .../csrc/jit/runtime/static/memory_planner.h | 5 ++-- 3 files changed, 16 insertions(+), 19 deletions(-) diff --git a/benchmarks/static_runtime/test_static_module.cc b/benchmarks/static_runtime/test_static_module.cc index 2d8ae40abd697..38290e3761062 100644 --- a/benchmarks/static_runtime/test_static_module.cc +++ b/benchmarks/static_runtime/test_static_module.cc @@ -1230,9 +1230,8 @@ void testAssignStorageToManagedTensors( ASSERT_EQ(managed_tensor_values.size(), tensor_value_to_tensor.size()); auto ranges = ManagedTensorRanges(graph, managed_tensor_values); - std::vector groups; - assignStorageToManagedTensors( - graph->block()->nodes(), ranges, tensor_value_to_tensor, groups); + auto groups = assignStorageToManagedTensors( + graph->block()->nodes(), ranges, tensor_value_to_tensor); checkStorageGroups( groups, ranges, tensor_value_to_tensor, min_reused_tensors); diff --git a/torch/csrc/jit/runtime/static/memory_planner.cpp b/torch/csrc/jit/runtime/static/memory_planner.cpp index 90267bb642df4..c44a70f6b491f 100644 --- a/torch/csrc/jit/runtime/static/memory_planner.cpp +++ b/torch/csrc/jit/runtime/static/memory_planner.cpp @@ -56,11 +56,11 @@ FastMap tensorValueToTensor( } // namespace -void assignStorageToManagedTensors( +std::vector assignStorageToManagedTensors( graph_node_list nodes, const ManagedTensorRanges& ranges, - const FastMap& tensor_value_to_tensor, - std::vector& managed_tensor_groups) { + const FastMap& tensor_value_to_tensor) { + std::vector managed_tensor_groups; // This set maps each Value* to its assigned storage group. FastMap storage_group_mapping; // On each iteration, this vector stores the set of storage groups that @@ -119,6 +119,7 @@ void assignStorageToManagedTensors( } } } + return managed_tensor_groups; } namespace { @@ -127,10 +128,10 @@ bool setIncludes(const FastSet& set, const Value* v) { return set.find(v) != set.end(); } -void assignStorageToOutputTensors( +std::vector> assignStorageToOutputTensors( StaticRuntime* runtime, - const FastSet& managed_output_tensor_values, - std::vector>& managed_output_tensors) { + const FastSet& managed_output_tensor_values) { + std::vector> managed_output_tensors; for (auto& pnode : runtime->nodes()) { for (const auto i : c10::irange(pnode.outputs().size())) { auto& ival = pnode.Output(i); @@ -144,6 +145,7 @@ void assignStorageToOutputTensors( managed_output_tensors.emplace_back(0, tensor); } } + return managed_output_tensors; } } // namespace @@ -213,11 +215,8 @@ MemoryPlanner::MemoryPlanner( const auto tensor_value_to_tensor = tensorValueToTensor(runtime->nodes(), managed_tensor_values); if (optimize_memory) { - ::torch::jit::assignStorageToManagedTensors( - runtime->node_ptrs(), - ranges, - tensor_value_to_tensor, - managed_tensors_); + managed_tensors_ = assignStorageToManagedTensors( + runtime->node_ptrs(), ranges, tensor_value_to_tensor); } else { for (auto& tensor : tensor_value_to_tensor) { managed_tensors_.emplace_back(tensor.second); @@ -226,8 +225,8 @@ MemoryPlanner::MemoryPlanner( } if (enable_out_variant && manage_output_tensors) { - ::torch::jit::assignStorageToOutputTensors( - runtime, managed_output_tensor_values, managed_output_tensors_); + managed_output_tensors_ = + assignStorageToOutputTensors(runtime, managed_output_tensor_values); } num_managed_tensors_ = 0; diff --git a/torch/csrc/jit/runtime/static/memory_planner.h b/torch/csrc/jit/runtime/static/memory_planner.h index a81813a7b485f..d4159eb2d1c3b 100644 --- a/torch/csrc/jit/runtime/static/memory_planner.h +++ b/torch/csrc/jit/runtime/static/memory_planner.h @@ -39,11 +39,10 @@ class StorageGroup { std::vector group_{}; }; -TORCH_API void assignStorageToManagedTensors( +TORCH_API std::vector assignStorageToManagedTensors( graph_node_list nodes, const ManagedTensorRanges& ranges, - const FastMap& tensor_value_to_tensor, - std::vector& managed_tensor_groups); + const FastMap& tensor_value_to_tensor); // There are three types of ops in a processed graph in Static Runtime: // 1. op with _out variant