Skip to content

Commit

Permalink
[onert] Use structured binding for pair (#13499)
Browse files Browse the repository at this point in the history
This commit applies structured binding to pair.

ONE-DCO-1.0-Signed-off-by: ragmani <[email protected]>
  • Loading branch information
ragmani authored Jul 26, 2024
1 parent 8d8c943 commit 09b3fff
Show file tree
Hide file tree
Showing 33 changed files with 132 additions and 215 deletions.
5 changes: 1 addition & 4 deletions runtime/onert/backend/acl_common/AclConstantInitializer.h
Original file line number Diff line number Diff line change
Expand Up @@ -176,11 +176,8 @@ class AclConstantInitializer : public ir::OperationVisitor
void run()
{
assert(_tensor_reg);
for (const auto &it : _init_map)
for (const auto &[ind, fn] : _init_map)
{
const auto &ind = it.first;
const auto &fn = it.second;

const auto &model_obj = _operands.at(ind);
auto tensor_obj = _tensor_reg->getNativeITensor(ind);
assert(tensor_obj != nullptr);
Expand Down
8 changes: 2 additions & 6 deletions runtime/onert/backend/acl_common/AclTensorBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -207,9 +207,7 @@ void AclTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::allocate(void)

for (const auto &entry : lifetime_map)
{
const auto &use = entry.second;
const auto &use_type = use.first;
const auto &use_index = use.second;
const auto &[use_type, use_index] = entry.second;
assert(use_index.valid());
if (use_type == UsesType::FIRST)
_tensor_mgr->startLifetime(use_index);
Expand Down Expand Up @@ -242,13 +240,11 @@ void AclTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::buildTensors(void)
assert(_tensor_mgr->nonconstTensors().size() == 0);

// Normal tensors
for (const auto &entry : _tensor_info_map)
for (const auto &[ind, info] : _tensor_info_map)
{
const auto &ind = entry.first;
if (_parent_map.count(ind) > 0)
continue;

const auto &info = entry.second;
auto tensor_info = asTensorInfo(info.shape(), info.typeInfo(), ir::Layout::UNKNOWN, true);
_tensor_mgr->buildTensor(ind, tensor_info, info.shape().rank(), info.isConstant(),
_uses_count_map[ind]);
Expand Down
12 changes: 6 additions & 6 deletions runtime/onert/backend/cl_common/src/LifetimeMap.cc
Original file line number Diff line number Diff line change
Expand Up @@ -55,10 +55,10 @@ LifetimeMap createLifetimeMap(LifetimeSeq &lifetime_seq,
LifetimeMap lifetime_map;
for (size_t i = 0; i < lifetime_seq.size(); i++)
{
auto &entry = lifetime_seq[i];
if (entry.first != UsesType::FIRST)
const auto &[entry_uses_type, entry_idx] = lifetime_seq[i];
if (entry_uses_type != UsesType::FIRST)
continue;
auto root_ind = find_root(entry.second);
auto root_ind = find_root(entry_idx);
if (first_use_check[root_ind])
continue;
first_use_check[root_ind] = true;
Expand All @@ -67,10 +67,10 @@ LifetimeMap createLifetimeMap(LifetimeSeq &lifetime_seq,

for (int i = lifetime_seq.size() - 1; i >= 0; i--)
{
auto &entry = lifetime_seq[i];
if (entry.first != UsesType::LAST)
const auto &[entry_uses_type, entry_idx] = lifetime_seq[i];
if (entry_uses_type != UsesType::LAST)
continue;
auto root_ind = find_root(entry.second);
auto root_ind = find_root(entry_idx);
if (last_use_check[root_ind])
continue;
last_use_check[root_ind] = true;
Expand Down
13 changes: 4 additions & 9 deletions runtime/onert/backend/train/MemoryPlanner.cc
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,9 @@ void FirstFitPlanner::claim(const DisposableTensorIndex &ind, size_t size)
{
// Find the right position for claiming
uint32_t next_offset = 0;
for (const auto &mem_claim : _claim_table)
for (const auto &[claimed_base_offset, claimed_index] : _claim_table)
{
auto claimed_base_offset = mem_claim.first;
auto claimed_size = _mem_plans[mem_claim.second].size;
auto claimed_size = _mem_plans[claimed_index].size;
if (next_offset + size <= claimed_base_offset)
{
break;
Expand Down Expand Up @@ -146,10 +145,8 @@ void WICPlanner::release(const DisposableTensorIndex &ind)
*/
void WICPlanner::buildMemoryPlans()
{
for (const auto &operand : _indices)
for (const auto &[size, ind] : _indices)
{
uint32_t size = operand.first;
const DisposableTensorIndex &ind = operand.second;
VERBOSE(WIC_PLANNER) << "build_plan(" << ind << "): [" << size << "sz]" << std::endl;

uint32_t next_offset = 0;
Expand All @@ -164,10 +161,8 @@ void WICPlanner::buildMemoryPlans()
}

// Find free memory block in first-fit manner
for (const auto &interfered_plan : interfered_plans)
for (const auto &[claimed_base_offset, claimed_size] : interfered_plans)
{
auto claimed_base_offset = interfered_plan.first;
auto claimed_size = interfered_plan.second;
VERBOSE(WIC_PLANNER) << "interfere : [+" << claimed_base_offset << ", " << claimed_size
<< "sz]" << std::endl;
if (next_offset + size <= claimed_base_offset)
Expand Down
4 changes: 1 addition & 3 deletions runtime/onert/backend/train/TensorManager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,8 @@ void allocateMemory(MemoryManager *mgr, const TensorMap &tensors, const std::str
{
mgr->allocate();

for (auto &&pair : tensors)
for (auto &&[index, tensor] : tensors)
{
const auto &index = pair.first;
auto tensor = pair.second.get();
assert(!tensor->is_dynamic());

auto *buffer = mgr->getBuffer(index);
Expand Down
12 changes: 3 additions & 9 deletions runtime/onert/backend/train/TensorPlanner.cc
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,8 @@ void TensorPlanner::planNonConstTensors(TensorBuilder *tensor_builder)

// Prepare scanning
// This assumes TrainingOperationIndex in forwarding are always used
for (const auto &pair : training_usedefs)
for (const auto &[operand_index, operand_usedefs] : training_usedefs)
{
const auto &operand_index = pair.first;
const auto &operand_usedefs = pair.second;
const auto &operand = operand_usedefs.operand();

if (_external_operands.contains(operand_index.index()))
Expand All @@ -69,21 +67,17 @@ void TensorPlanner::planNonConstTensors(TensorBuilder *tensor_builder)
// It's neither an external operand or a constant operand
// What does it mean when def count is 0?
// A. Not yet found the reason to need it yet.
for (const auto &pair : defs_map)
for (const auto &[operand_index, def_count] : defs_map)
{
const auto &operand_index = pair.first;
const auto def_count = pair.second;
if (def_count == 0)
tensor_builder->notifyFirstUse(operand_index.index());
}

// This is a workaround to keep the operands over the execution
// (the operands look like they are unused)
std::vector<ir::train::TrainingOperandIndex> operands_last_until_end;
for (const auto &pair : uses_map)
for (const auto &[operand_index, use_count] : uses_map)
{
const auto &operand_index = pair.first;
const auto use_count = pair.second;
if (use_count == 0)
operands_last_until_end.push_back(operand_index);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,21 +81,17 @@ template <typename T_BackendContext> void planTensors(const T_BackendContext &ct
tensor_builder->notifyFirstUse(ind);
}

for (const auto &pair : def_map)
for (const auto &[ind, def_count] : def_map)
{
const auto &ind = pair.first;
const auto def_count = pair.second;
if (def_count == 0)
tensor_builder->notifyFirstUse(ind);
}

// This is a workaround to keep the operands over the execution
// (the operands look like they are unused)
std::vector<ir::OperandIndex> operands_last_until_end;
for (const auto &pair : uses_map)
for (const auto &[ind, use_count] : uses_map)
{
const auto &ind = pair.first;
const auto use_count = pair.second;
if (use_count == 0)
operands_last_until_end.push_back(ind);
}
Expand Down
4 changes: 2 additions & 2 deletions runtime/onert/core/include/backend/train/ITensorRegistry.h
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,8 @@ class PortableTensorRegistryTemplate : public backend::train::ITensorRegistry
const std::function<void(const ir::OperandIndex &, const train::ITrainableTensor *)> &fn)
const override
{
for (const auto &e : _trainable)
fn(e.first, e.second.get());
for (const auto &[index, tensor] : _trainable)
fn(index, tensor.get());
}

IPortableTensor *getPortableTensor(const ir::OperandIndex &index)
Expand Down
4 changes: 2 additions & 2 deletions runtime/onert/core/include/compiler/BackendResolver.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,9 @@ class BackendResolver
void
iterate(const std::function<void(const ir::OperationIndex &, const backend::Backend &)> &fn) const
{
for (const auto &e : _gen_map)
for (const auto &[op_index, backend] : _gen_map)
{
fn(e.first, *e.second);
fn(op_index, *backend);
}
}

Expand Down
8 changes: 4 additions & 4 deletions runtime/onert/core/include/ir/Model.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,9 +105,9 @@ class Model
*/
void iterate(const std::function<void(const SubgraphIndex &, const IGraph &)> &fn) const
{
for (const auto &e : _subgraphs)
for (const auto &[subg_idx, subg] : _subgraphs)
{
fn(e.first, *e.second);
fn(subg_idx, *subg);
}
}

Expand All @@ -119,9 +119,9 @@ class Model
*/
void iterate(const std::function<void(const SubgraphIndex &, IGraph &)> &fn)
{
for (const auto &e : _subgraphs)
for (const auto &[subg_idx, subg] : _subgraphs)
{
fn(e.first, *e.second);
fn(subg_idx, *subg);
}
}

Expand Down
4 changes: 2 additions & 2 deletions runtime/onert/core/include/util/ObjectManager.h
Original file line number Diff line number Diff line change
Expand Up @@ -185,9 +185,9 @@ template <typename Index, typename Object> class ObjectManager
*/
void iterate(const std::function<void(const Index &, const Object &)> &fn) const
{
for (const auto &e : _objects)
for (const auto &[index, obj] : _objects)
{
fn(e.first, *e.second);
fn(index, *obj);
}
}
/**
Expand Down
13 changes: 4 additions & 9 deletions runtime/onert/core/src/backend/basic/MemoryPlanner.cc
Original file line number Diff line number Diff line change
Expand Up @@ -58,10 +58,9 @@ void FirstFitPlanner::claim(const ir::OperandIndex &ind, size_t size)
{
// Find the right position for claiming
uint32_t next_offset = 0;
for (const auto &mem_claim : _claim_table)
for (const auto &[claimed_base_offset, claimed_operand_idx] : _claim_table)
{
auto claimed_base_offset = mem_claim.first;
auto claimed_size = _mem_plans[mem_claim.second].size;
auto claimed_size = _mem_plans[claimed_operand_idx].size;
if (next_offset + size <= claimed_base_offset)
{
break;
Expand Down Expand Up @@ -143,10 +142,8 @@ void WICPlanner::release(const ir::OperandIndex &ind)
*/
void WICPlanner::buildMemoryPlans()
{
for (const auto &operand : _operands)
for (const auto &[size, ind] : _operands)
{
uint32_t size = operand.first;
const ir::OperandIndex &ind = operand.second;
VERBOSE(WIC_PLANNER) << "build_plan(" << ind << "): [" << size << "sz]" << std::endl;

uint32_t next_offset = 0;
Expand All @@ -161,10 +158,8 @@ void WICPlanner::buildMemoryPlans()
}

// Find free memory block in first-fit manner
for (const auto &interfered_plan : interfered_plans)
for (const auto &[claimed_base_offset, claimed_size] : interfered_plans)
{
auto claimed_base_offset = interfered_plan.first;
auto claimed_size = interfered_plan.second;
VERBOSE(WIC_PLANNER) << "interfere : [+" << claimed_base_offset << ", " << claimed_size
<< "sz]" << std::endl;
if (next_offset + size <= claimed_base_offset)
Expand Down
4 changes: 1 addition & 3 deletions runtime/onert/core/src/backend/basic/StaticTensorManager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,8 @@ void StaticTensorManager::allocateNonconsts(void)
{
_nonconst_mgr->allocate();

for (auto &&pair : _tensors->native_tensors())
for (auto &&[ind, tensor] : _tensors->native_tensors())
{
const auto &ind = pair.first;
auto tensor = pair.second.get();
if (!_as_constants[ind] && !tensor->is_dynamic())
{
auto *buffer = _nonconst_mgr->getBuffer(ind);
Expand Down
8 changes: 2 additions & 6 deletions runtime/onert/core/src/compiler/Compiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -118,10 +118,8 @@ std::shared_ptr<CompilerArtifact> Compiler::compile(void)

_model.reset();

for (const auto &pair : lowered_subgs)
for (const auto &[subg_index, lowered_subg] : lowered_subgs)
{
const auto &subg_index = pair.first;
const auto &lowered_subg = pair.second;
dot_dumper.dump(*lowered_subg, nnfw::misc::str("after_lower_subg-", subg_index.value()));
}

Expand Down Expand Up @@ -159,11 +157,9 @@ std::shared_ptr<CompilerArtifact> Compiler::compile(void)
* Backend independent analysis & optimization phase finished
*************************************************************/
auto executors = std::make_shared<exec::SingleModelExecutors>();
for (auto &&pair : lowered_subgs)
for (auto &&[subg_index, lowered_subg] : lowered_subgs)
{
auto const model_index = ir::ModelIndex{0};
auto const subg_index = pair.first;
auto &lowered_subg = pair.second;
auto const indexed_ranks = lowered_subg->indexed_ranks();

ir::OperationDumper dumper("Executor generation of Subgraph " +
Expand Down
4 changes: 2 additions & 2 deletions runtime/onert/core/src/compiler/CompilerHelpers.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ createStaticShapeInferers(
const std::unordered_map<ir::SubgraphIndex, std::unique_ptr<LoweredGraphType>> &lowered_subgs)
{
std::unordered_map<ir::SubgraphIndex, ILoweredGraph *> lsubgs;
for (auto &&e : lowered_subgs)
lsubgs[e.first] = e.second.get();
for (auto &&[subg_index, lowered_subg] : lowered_subgs)
lsubgs[subg_index] = lowered_subg.get();
return StaticShapeInferer::createStaticShapeInferers(lsubgs);
}

Expand Down
Loading

0 comments on commit 09b3fff

Please sign in to comment.