Skip to content

Commit

Permalink
[onert] Apply BackPropInitializer layer (Samsung#12945)
Browse files Browse the repository at this point in the history
This commit applies BackPropInitializer layer to train backend.
BackPropInitializer layer is called at the beginning of the TrainableFnSequence that first uses the corresponding BackPropTensor.

ONE-DCO-1.0-Signed-off-by: ragmani <[email protected]>
  • Loading branch information
ragmani authored May 8, 2024
1 parent 630a843 commit 29095c7
Show file tree
Hide file tree
Showing 2 changed files with 69 additions and 7 deletions.
73 changes: 66 additions & 7 deletions runtime/onert/backend/train/BackendContext.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,12 @@

#include "TensorBuilder.h"
#include "KernelGenerator.h"
#include "ops/BackPropInitializer.h"

#include <backend/basic/train/TrainableBackendContextHelpers.h>
#include <misc/polymorphic_downcast.h>

#include <cassert>

namespace onert
{
Expand All @@ -28,6 +32,49 @@ namespace backend
namespace train
{

namespace
{
void AddBackPropInitializers(const ir::train::TrainableGraph &tgraph, TensorRegistry &tensor_reg,
FunctionMap &fn_map)
{
util::Set<ir::OperandIndex> unvisited;
tgraph.operands().iterate([&](const ir::OperandIndex &index, const ir::Operand &operand) {
// TODO Consider not adding BackPropInitializer if the coresponding BackPropTensors don't
// require initilization (i.g. BackPropTensors that are not back-propagated)
if (!tgraph.getInputs().contains(index) && !operand.isConstant())
unvisited.add(index);
});

for (const auto &op_index : tgraph.btopolSortOperations())
{
assert(fn_map.find(op_index) != fn_map.end());

auto &tn_seq = fn_map.at(op_index);

// The function added lastest is executed first in a sequence during backwarding.
std::vector<BackPropTensor *> back_props;
const auto &op = tgraph.operations().at(op_index);
for (const auto &back_prop_index :
op.getInputs() | ir::Remove::UNDEFINED | ir::Remove::DUPLICATED)
{
if (unvisited.contains(back_prop_index))
{
auto back_prop_tensor = tensor_reg.getBackPropTensor(back_prop_index);
assert(back_prop_tensor != nullptr);
back_props.emplace_back(back_prop_tensor);
unvisited.remove(back_prop_index);
}
}

if (back_props.size() != 0)
{
auto initializer = std::make_unique<ops::BackPropInitializer>(back_props);
tn_seq->append(std::move(initializer));
}
}
}
} // namespace

backend::ITensorRegistry *BackendContext::genTensors()
{
return basic::train::genTensors(*this, _tensor_builder);
Expand Down Expand Up @@ -63,13 +110,7 @@ backend::train::ITensorRegistry *BackendContext::genTrainingTensors()

FunctionMap BackendContext::genKernels()
{
train::FunctionMap ret;

for (const auto &op_ind : _tdata->op_order)
{
auto fn_seq = kernel_gen->generate(op_ind);
ret.emplace(op_ind, std::move(fn_seq));
}
auto ret = generateFunctionMap();

// Initialize TrainableTensors
trainable_graph()->operands().iterate(
Expand Down Expand Up @@ -107,6 +148,24 @@ FunctionMap BackendContext::genKernels()
return ret;
}

FunctionMap BackendContext::generateFunctionMap()
{
train::FunctionMap ret;

for (const auto &op_ind : _tdata->op_order)
{
auto fn_seq = kernel_gen->generate(op_ind);
ret.emplace(op_ind, std::move(fn_seq));
}

// NOTE Each BackPropInitializer should be called first in each op node during backwarding
const auto &tgraph = *_tdata->tgraph;
auto tensor_reg = nnfw::misc::polymorphic_downcast<TensorRegistry *>(_tensor_registry.get());
AddBackPropInitializers(tgraph, *tensor_reg, ret);

return ret;
}

} // namespace train
} // namespace backend
} // namespace onert
3 changes: 3 additions & 0 deletions runtime/onert/backend/train/BackendContext.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,9 @@ class BackendContext : public onert::backend::train::TrainableBackendContext

const exec::train::optimizer::Optimizer *optimizer() const { return _optimizer.get(); }

private:
FunctionMap generateFunctionMap();

public:
// TODO Make it private
std::shared_ptr<KernelGenerator> kernel_gen;
Expand Down

0 comments on commit 29095c7

Please sign in to comment.