Skip to content

Commit

Permalink
Fix warnings in training code.
Browse files Browse the repository at this point in the history
  • Loading branch information
skottmckay committed Feb 27, 2024
1 parent c1f88db commit a6aa359
Show file tree
Hide file tree
Showing 4 changed files with 5 additions and 11 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -311,11 +311,9 @@ template <typename T, typename TOut>
static std::vector<OrtValue> RunSCELossWithEP(const char* op,
int opset_version,
const char* domain,
std::function<std::unique_ptr<IExecutionProvider>()>
ep_creator,
std::function<std::unique_ptr<IExecutionProvider>()> ep_creator,
const std::string& reduction,
const std::int64_t ignore_index,
const double error_tolerance,
const std::vector<int64_t>* X_dims,
const std::vector<int64_t>* index_dims,
const std::vector<int64_t>* weight_dims,
Expand Down Expand Up @@ -403,15 +401,15 @@ static void TestSCELoss(const char* op, int opset_version,
cpu_fetches = RunSCELossWithEP<float, float>(
op, opset_version, domain,
[]() -> std::unique_ptr<IExecutionProvider> { return DefaultCpuExecutionProvider(); },
reduction, ignore_index, error_tolerance,
reduction, ignore_index,
X_dims, index_dims, weight_dims,
Y_dims, log_prob_dims,
X_data_temp, index_data, weight_data_temp);
} else {
cpu_fetches = RunSCELossWithEP<T, float>(
op, opset_version, domain,
[]() -> std::unique_ptr<IExecutionProvider> { return DefaultCpuExecutionProvider(); },
reduction, ignore_index, error_tolerance,
reduction, ignore_index,
X_dims, index_dims, weight_dims,
Y_dims, log_prob_dims,
X_data, index_data, weight_data);
Expand All @@ -429,7 +427,7 @@ static void TestSCELoss(const char* op, int opset_version,
return DefaultRocmExecutionProvider();
#endif
},
reduction, ignore_index, error_tolerance,
reduction, ignore_index,
X_dims, index_dims, weight_dims,
Y_dims, log_prob_dims,
X_data, index_data, weight_data);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@ Status ConvTransposeGrad<T>::ComputeInputGradient(onnxruntime::Stream* stream, c
algo_perf.algo, workspace.get(), algo_perf.memory, &zero, args.y_tensor, args.y_data));
return Status::OK();
});
return Status::OK();
}

template <typename T>
Expand All @@ -71,7 +70,6 @@ Status ConvTransposeGrad<T>::ComputeWeightGradient(onnxruntime::Stream* stream,
algo_perf.algo, workspace.get(), algo_perf.memory, &zero, args.w_desc, args.dw_data));
return Status::OK();
});
return Status::OK();
}

template <typename T>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,6 @@
namespace onnxruntime {
namespace cuda {

using namespace onnxruntime::cuda;

namespace {
// This is the un-specialized struct. Note that we prevent instantiation of this
// struct by putting an undefined symbol in the function body so it won't compile.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -619,7 +619,7 @@ CudaKernel::CudaAsyncBuffer<LambMultiTensorSyncRangeAndLock> compute_tensor_rang

template <typename TIn1, typename TIn2, typename TOut1, typename TOut2, typename TBuf>
void LambMultiTensorReductionFunctor<TIn1, TIn2, TOut1, TOut2, TBuf>::operator()(
cudaStream_t stream,
cudaStream_t /*stream*/,
ChunkGroup<4> chunk_group,
const CudaKernel& kernel,
void* reduction_buffer,
Expand Down

0 comments on commit a6aa359

Please sign in to comment.