Skip to content

Commit

Permalink
Fix some x86 build warnings in training code (#20451)
Browse files Browse the repository at this point in the history
### Description
<!-- Describe your changes. -->
Fix some misc build warnings from x86 Windows build


### Motivation and Context
<!-- - Why is this change required? What problem does it solve?
- If it fixes an open issue, please link to the issue here. -->
  • Loading branch information
skottmckay authored Apr 26, 2024
1 parent aa27dad commit b842eff
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 18 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -117,8 +117,8 @@ TEST(CheckpointApiTest, SaveOnnxModelAsCheckpoint_ThenLoad_CPU) {

// Check loaded parameter's values are same with original ones.
ASSERT_EQ(expected_trainable_param_name_to_ort_value.size(), restored_trainable_param_names.size());
ASSERT_EQ(expected_trainable_param_name_to_ort_value.size(), 7);
ASSERT_EQ(restored_param_name_to_ort_values.size(), 9);
ASSERT_EQ(expected_trainable_param_name_to_ort_value.size(), size_t{7});
ASSERT_EQ(restored_param_name_to_ort_values.size(), size_t{9});

std::sort(expected_trainable_param_names.begin(), expected_trainable_param_names.end());
std::sort(restored_trainable_param_names.begin(), restored_trainable_param_names.end());
Expand Down Expand Up @@ -225,8 +225,8 @@ TEST(CheckpointApiTest, SaveOnnxModelAsCheckpointThenLoadFromBufferCPU) {

// Check loaded parameter's values are same with original ones.
ASSERT_EQ(expected_trainable_param_name_to_ort_value.size(), restored_trainable_param_names.size());
ASSERT_EQ(expected_trainable_param_name_to_ort_value.size(), 7);
ASSERT_EQ(restored_param_name_to_ort_values.size(), 9);
ASSERT_EQ(expected_trainable_param_name_to_ort_value.size(), size_t{7});
ASSERT_EQ(restored_param_name_to_ort_values.size(), size_t{9});

std::sort(expected_trainable_param_names.begin(), expected_trainable_param_names.end());
std::sort(restored_trainable_param_names.begin(), restored_trainable_param_names.end());
Expand Down Expand Up @@ -308,7 +308,7 @@ TEST(CheckpointApiTest, SaveOptimizerStateAsCheckpoint_ThenLoad) {

std::vector<Ort::Value> all_weights_values;
data_loader.GetNextSampleBatch(all_weights_values);
ASSERT_EQ(all_weights_values.size(), 4);
ASSERT_EQ(all_weights_values.size(), size_t{4});
NameMLValMap name_to_ort_value{
{"fc1.weight", *all_weights_values[0]},
{"fc1.bias", *all_weights_values[1]},
Expand Down Expand Up @@ -360,7 +360,7 @@ TEST(CheckpointApiTest, SaveOptimizerStateAsCheckpoint_ThenLoad) {
InlinedHashMap<std::string, std::shared_ptr<GroupOptimizerState>>&
group_optimizer_states = optimizer_state.group_named_optimizer_states;

ASSERT_EQ(group_optimizer_states.size(), 1);
ASSERT_EQ(group_optimizer_states.size(), size_t{1});
ASSERT_EQ(group_optimizer_states.begin()->first, "group0");

InlinedHashMap<std::string, ParameterOptimizerState>&
Expand Down Expand Up @@ -429,7 +429,7 @@ TEST(CheckpointApiTest, SaveCustomPropertyAsCheckpoint_ThenLoad_CPU) {
CheckpointState checkpoint_state_to_load;
ASSERT_STATUS_OK(LoadCheckpoint(checkpoint_path, checkpoint_state_to_load));
PropertyBag& restored_property_bag = checkpoint_state_to_load.property_bag;
ASSERT_EQ(restored_property_bag.size(), 3);
ASSERT_EQ(restored_property_bag.size(), size_t{3});
float restored_f_data = restored_property_bag.GetProperty<float>(f_property_name);
ASSERT_FLOAT_EQ(f_data, restored_f_data);
int64_t restored_i_data = restored_property_bag.GetProperty<int64_t>(i_property_name);
Expand Down Expand Up @@ -559,8 +559,8 @@ TEST(CheckpointApiTest, SaveOnnxModelAsCheckpoint_ThenLoad_WithExternalData) {

// Check loaded parameter's values are same with original ones.
ASSERT_EQ(expected_trainable_param_name_to_ort_value.size(), restored_trainable_param_names.size());
ASSERT_EQ(expected_trainable_param_name_to_ort_value.size(), 7);
ASSERT_EQ(restored_param_name_to_ort_values.size(), 9);
ASSERT_EQ(expected_trainable_param_name_to_ort_value.size(), size_t{7});
ASSERT_EQ(restored_param_name_to_ort_values.size(), size_t{9});

std::sort(expected_trainable_param_names.begin(), expected_trainable_param_names.end());
std::sort(restored_trainable_param_names.begin(), restored_trainable_param_names.end());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -208,8 +208,8 @@ TEST(TrainingApiTest, ModuleParametersSize) {
}

// ((500*784) + 500 + (10*500) + 10) = 397510
ASSERT_EQ(params_size, 397510);
ASSERT_EQ(model->GetParametersSize(), 397510);
ASSERT_EQ(params_size, size_t{397510});
ASSERT_EQ(model->GetParametersSize(), size_t{397510});
}

TEST(TrainingApiTest, ModuleCopyBufferToParameters) {
Expand Down Expand Up @@ -269,7 +269,7 @@ TEST(TrainingApiTest, ModuleTrainStep) {
auto model = std::make_unique<onnxruntime::training::api::Module>(model_identifier,
&state, session_option,
*env, std::vector<std::shared_ptr<IExecutionProvider>>());
ASSERT_EQ(model->GetTrainingModelOutputCount(), 1);
ASSERT_EQ(model->GetTrainingModelOutputCount(), size_t{1});
OrtValue input, target;
GenerateRandomInput(std::array<int64_t, 2>{2, 784}, input);
target = onnxruntime::test::CreateInputOrtValueOnCPU<int32_t>(
Expand Down Expand Up @@ -659,7 +659,7 @@ TEST(TrainingApiTest, ModuleAndOptimizerWithNominalState) {
ASSERT_STATUS_OK(model_with_complete_state->TrainStep(inputs, complete_fetches));
ASSERT_STATUS_OK(model_with_nominal_state->TrainStep(inputs, nominal_fetches));

ASSERT_GT(complete_fetches.size(), 0);
ASSERT_GT(complete_fetches.size(), size_t{0});
for (size_t i = 0; i < complete_fetches.size(); ++i) {
ASSERT_TRUE(complete_fetches[i].IsTensor());
ASSERT_TRUE(nominal_fetches[i].IsTensor());
Expand Down Expand Up @@ -730,7 +730,7 @@ TEST(TrainingApiTest, ModuleAndOptimizerWithNominalState) {
ASSERT_STATUS_OK(model_with_complete_state->EvalStep(inputs, complete_eval_fetches));
ASSERT_STATUS_OK(model_with_nominal_state->EvalStep(inputs, nominal_eval_fetches));

ASSERT_GT(complete_eval_fetches.size(), 0);
ASSERT_GT(complete_eval_fetches.size(), size_t{0});
for (size_t i = 0; i < complete_eval_fetches.size(); ++i) {
ASSERT_TRUE(complete_eval_fetches[i].IsTensor());
ASSERT_TRUE(nominal_eval_fetches[i].IsTensor());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ void RunDropoutTest(const bool use_mask, const std::vector<int64_t>& input_shape
}

auto output_verifier = [&](const std::vector<OrtValue>& fetches, const std::string& provider_type) {
ASSERT_GE(fetches.size(), 1);
ASSERT_GE(fetches.size(), size_t{1});
const auto& output_tensor = fetches[0].Get<Tensor>();
auto output_span = output_tensor.DataAsSpan<float>();

Expand All @@ -99,7 +99,7 @@ void RunDropoutTest(const bool use_mask, const std::vector<int64_t>& input_shape
}

if (use_mask) {
ASSERT_GE(fetches.size(), 2);
ASSERT_GE(fetches.size(), size_t{2});
const auto& mask_tensor = fetches[1].Get<Tensor>();
auto mask_span = mask_tensor.DataAsSpan<bool>();
ASSERT_EQ(mask_span.size(), output_span.size()) << "provider: " << provider_type;
Expand Down
4 changes: 2 additions & 2 deletions orttraining/orttraining/training_api/module.cc
Original file line number Diff line number Diff line change
Expand Up @@ -517,7 +517,7 @@ Status Module::CopyParametersToBuffer(OrtValue& parameters_buffer, const bool tr
"Only float is supported.");
}
ORT_RETURN_IF_ERROR(sess_data_transfer_manager.CopyTensor(*weight_tensor, *p_tensor.get()));
offset += shape.Size();
offset += narrow<size_t>(shape.Size());
}
return Status::OK();
}
Expand Down Expand Up @@ -601,7 +601,7 @@ Status Module::CopyBufferToParameters(OrtValue& parameters_buffer, const bool tr
ORT_THROW_IF_ERROR(sess_data_transfer_manager.CopyTensor(*src_tensor.get(), *weight_tensor));
}

offset += shape.Size();
offset += narrow<size_t>(shape.Size());
}

if (state_->module_checkpoint_state.is_nominal_state) {
Expand Down

0 comments on commit b842eff

Please sign in to comment.