Skip to content

Commit

Permalink
Remove unused variables
Browse files Browse the repository at this point in the history
  • Loading branch information
PatriceVignola committed Oct 7, 2023
1 parent 779197d commit b809a5b
Show file tree
Hide file tree
Showing 3 changed files with 3 additions and 50 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ namespace Dml
const onnxruntime::OpKernelInfo& kernelInfo,
std::shared_ptr<const onnxruntime::IndexedSubGraph> indexedSubGraph,
const onnxruntime::Path& modelPath,
std::shared_ptr<std::vector<std::vector<std::string>>> inputDimParams,
std::vector<std::shared_ptr<onnxruntime::Node>>&& subgraphNodes,
std::vector<const onnxruntime::NodeArg*>&& subgraphInputs,
std::vector<const onnxruntime::NodeArg*>&& subgraphOutputs,
Expand All @@ -30,7 +29,6 @@ namespace Dml
: OpKernel(kernelInfo),
m_indexedSubGraph(std::move(indexedSubGraph)),
m_modelPath(modelPath),
m_inputDimParams(std::move(inputDimParams)),
m_subgraphNodes(std::move(subgraphNodes)),
m_subgraphInputs(std::move(subgraphInputs)),
m_subgraphOutputs(std::move(subgraphOutputs)),
Expand Down Expand Up @@ -68,8 +66,6 @@ namespace Dml
std::vector<Microsoft::WRL::ComPtr<ID3D12Resource>>& initializeResourceRefs,
std::vector<DML_BUFFER_BINDING> initInputBindings) const
{
std::optional<DML_BUFFER_BINDING> persistentResourceBinding;

// Allocate a persistent resource and initialize the operator
UINT64 persistentResourceSize = m_compiledExecutionPlanOperator->GetBindingProperties().PersistentResourceSize;
if (persistentResourceSize > 0)
Expand All @@ -80,12 +76,12 @@ namespace Dml
m_persistentResource.GetAddressOf(),
m_persistentResourceAllocatorUnk.GetAddressOf()));

persistentResourceBinding = DML_BUFFER_BINDING { m_persistentResource.Get(), 0, persistentResourceSize };
m_persistentResourceBinding = DML_BUFFER_BINDING { m_persistentResource.Get(), 0, persistentResourceSize };
}

ORT_THROW_IF_FAILED(m_provider->InitializeOperator(
m_compiledExecutionPlanOperator.Get(),
persistentResourceBinding ? &*persistentResourceBinding : nullptr,
m_persistentResourceBinding ? &*m_persistentResourceBinding : nullptr,
gsl::make_span(initInputBindings)));

// Queue references to objects which must be kept alive until resulting GPU work completes
Expand Down Expand Up @@ -303,17 +299,10 @@ namespace Dml
ComPtr<IWinmlExecutionProvider> m_winmlProvider;
ComPtr<Dml::IExecutionProvider> m_provider;

// Re-usable command list, supporting descriptor heap, and DML binding table to update that heap.
ComPtr<ID3D12GraphicsCommandList> m_graphicsCommandList;
ComPtr<ID3D12CommandAllocator> m_commandAllocator;
ComPtr<ID3D12DescriptorHeap> m_heap;
ComPtr<IDMLBindingTable> m_bindingTable;
std::optional<DML_BUFFER_BINDING> m_persistentResourceBinding;
mutable std::optional<DML_BUFFER_BINDING> m_persistentResourceBinding;
std::shared_ptr<const onnxruntime::IndexedSubGraph> m_indexedSubGraph;
const onnxruntime::Path& m_modelPath;

// TODO (pavignol): Remove m_inputDimParams if truly not needed
std::shared_ptr<std::vector<std::vector<std::string>>> m_inputDimParams;
std::vector<std::shared_ptr<onnxruntime::Node>> m_subgraphNodes;
std::vector<const onnxruntime::NodeArg*> m_subgraphInputs;
std::vector<const onnxruntime::NodeArg*> m_subgraphOutputs;
Expand All @@ -326,26 +315,17 @@ namespace Dml
// Bindings from previous executions of a re-used command list
mutable std::vector<std::unique_ptr<ONNX_NAMESPACE::TensorProto>> m_ownedCpuInputs;
mutable ComPtr<IDMLCompiledOperator> m_compiledExecutionPlanOperator;
mutable std::vector<uint64_t> m_inputBindingAllocIds;
mutable std::vector<uint64_t> m_outputBindingAllocIds;
mutable uint64_t m_tempBindingAllocId = 0;
mutable std::vector<bool> m_inputsUsed;
mutable ComPtr<ID3D12Resource> m_persistentResource;
mutable ComPtr<IUnknown> m_persistentResourceAllocatorUnk; // Controls when the persistent resource is returned to the allocator
mutable Windows::AI::MachineLearning::Adapter::EdgeShapes m_outputShapes;
mutable std::unordered_map<std::string, onnxruntime::TensorShape> m_inferredInputShapes;

// Fence tracking the status of the command list's last execution, and whether its descriptor heap
// can safely be updated.
mutable ComPtr<ID3D12Fence> m_fence;
mutable uint64_t m_completionValue = 0;
};

onnxruntime::OpKernel* CreateRuntimeFusedGraphKernel(
const onnxruntime::OpKernelInfo& info,
std::shared_ptr<const onnxruntime::IndexedSubGraph> indexedSubGraph,
const onnxruntime::Path& modelPath,
std::shared_ptr<std::vector<std::vector<std::string>>> inputDimParams,
std::vector<std::shared_ptr<onnxruntime::Node>>&& subgraphNodes,
std::vector<const onnxruntime::NodeArg*>&& subgraphInputs,
std::vector<const onnxruntime::NodeArg*>&& subgraphOutputs,
Expand All @@ -357,7 +337,6 @@ namespace Dml
info,
std::move(indexedSubGraph),
modelPath,
std::move(inputDimParams),
std::move(subgraphNodes),
std::move(subgraphInputs),
std::move(subgraphOutputs),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ namespace Dml
const onnxruntime::OpKernelInfo& info,
std::shared_ptr<const onnxruntime::IndexedSubGraph> indexedSubGraph,
const onnxruntime::Path& modelPath,
std::shared_ptr<std::vector<std::vector<std::string>>> inputDimParams,
std::vector<std::shared_ptr<onnxruntime::Node>>&& subgraphNodes,
std::vector<const onnxruntime::NodeArg*>&& subgraphInputs,
std::vector<const onnxruntime::NodeArg*>&& subgraphOutputs,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -369,9 +369,6 @@ namespace DmlRuntimeGraphFusionHelper
subgraphOutputs.push_back(graph.GetNodeArg(graphOutputName));
}

// We store the input dim params that haven't been overriden yet so that we can map their value at runtime once the real inputs are provided
auto inputDimParams = std::make_shared<std::vector<std::vector<std::string>>>();

// We need to keep the initializers alive since they will be freed once the nodes are removed from the graph
std::vector<ONNX_NAMESPACE::TensorProto> ownedInitializers;
ownedInitializers.reserve(isInitializerTransferable.size());
Expand All @@ -393,7 +390,6 @@ namespace DmlRuntimeGraphFusionHelper

// lamda captures for the kernel registration
auto fused_kernel_func = [
inputDimParams,
indexedSubGraph,
&modelPath,
nodesInfo = std::move(nodesInfo),
Expand Down Expand Up @@ -422,7 +418,6 @@ namespace DmlRuntimeGraphFusionHelper
info,
indexedSubGraph,
modelPath,
std::move(inputDimParams),
std::move(subgraphNodes),
std::move(subgraphInputs),
std::move(subgraphOutputs),
Expand Down Expand Up @@ -453,26 +448,6 @@ namespace DmlRuntimeGraphFusionHelper
auto& fusedNode = graph.BeginFuseSubGraph(*indexedSubGraph, indexedSubGraph->GetMetaDef()->name);
fusedNode.SetExecutionProviderType(onnxruntime::kDmlExecutionProvider);

inputDimParams->resize(fusedNode.InputDefs().size());

for (int inputIndex = 0; inputIndex < fusedNode.InputDefs().size(); ++inputIndex)
{
const onnxruntime::NodeArg* inputDef = fusedNode.InputDefs()[inputIndex];

ORT_THROW_HR_IF(E_INVALIDARG, !inputDef->TypeAsProto()->has_tensor_type());
const auto& tensorShape = inputDef->TypeAsProto()->tensor_type().shape();

(*inputDimParams)[inputIndex].resize(tensorShape.dim_size());

for (int i = 0; i < tensorShape.dim_size(); ++i)
{
if (tensorShape.dim(i).has_dim_param())
{
(*inputDimParams)[inputIndex][i] = tensorShape.dim(i).dim_param();
}
}
}

graph.FinalizeFuseSubGraph(*indexedSubGraph, fusedNode);
}
}
Expand Down

0 comments on commit b809a5b

Please sign in to comment.