Skip to content

Commit

Permalink
Lint
Browse files Browse the repository at this point in the history
  • Loading branch information
RyanUnderhill committed Feb 27, 2024
1 parent 1269a65 commit 16f59e1
Show file tree
Hide file tree
Showing 4 changed files with 6 additions and 8 deletions.
4 changes: 2 additions & 2 deletions src/config.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
#include "json.h"
#include <fstream>
#include <sstream>
#include <iostream> // std::cout warnings
#include <iostream> // std::cout warnings

namespace Generators {

Expand Down Expand Up @@ -131,7 +131,7 @@ struct Model_Element : JSON::Element {
if (name == "type") {
v_.type = value;
} else if (name == "logits_type") {
std::cout << "genai-config.json warning: logits_type is deprecated" << std::endl; // TODO: Remove once removed from model builder
std::cout << "genai-config.json warning: logits_type is deprecated" << std::endl; // TODO: Remove once removed from model builder
} else if (name == "kv_type") {
std::cout << "genai-config.json warning: kv_type is deprecated" << std::endl; // TODO: Remove once removed from model builder
} else
Expand Down
4 changes: 2 additions & 2 deletions src/models/input_ids.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ InputIDs::InputIDs(const Model& model, State& state)
type_ = model_.session_info_->GetInputDataType(name_);

// If 64-bit, convert from 32-bit to 64-bit
if(type_==Ort::TypeToTensorType<int64_t>::type) {
if (type_ == Ort::TypeToTensorType<int64_t>::type) {
value_ = OrtValue::CreateTensor(model.allocator_cpu_, shape_, type_);
auto* p_data = value_->GetTensorMutableData<int64_t>();
for (auto v : state_.search_params_.input_ids) {
Expand Down Expand Up @@ -45,7 +45,7 @@ void InputIDs::Update(RoamingArray<int32_t> next_tokens_unk) {
}

// Update input_ids with next tokens, converting from 32-bit to 64-bit
if(type_ == Ort::TypeToTensorType<int64_t>::type) {
if (type_ == Ort::TypeToTensorType<int64_t>::type) {
auto* data = value_->GetTensorMutableData<int64_t>();
#if USE_CUDA
if (model_.device_type_ == DeviceType::CUDA) {
Expand Down
1 change: 0 additions & 1 deletion src/models/logits.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ Logits::Logits(const Model& model, State& state)
state_{state},
shape_{state_.search_params_.batch_size * state_.search_params_.num_beams, state_.search_params_.sequence_length, state_.search_params_.vocab_size},
type_{model_.session_info_->GetOutputDataType(model_.config_->model.decoder.outputs.logits)} {

value_ = OrtValue::CreateTensor(*model.allocator_device_, shape_, type_);

if (model_.device_type_ == DeviceType::CPU && type_ != Ort::TypeToTensorType<float>::type)
Expand Down
5 changes: 2 additions & 3 deletions src/models/model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,6 @@ Ort::Allocator* GetCudaAllocator(OrtSession& session) {
#endif

SessionInfo::SessionInfo(OrtSession& session) {

auto input_names = session.GetInputNames();
std::vector<ONNXTensorElementDataType> input_types(input_names.size());
for (size_t i = 0; i < input_types.size(); i++) {
Expand All @@ -156,11 +155,11 @@ SessionInfo::SessionInfo(OrtSession& session) {
}

ONNXTensorElementDataType SessionInfo::GetInputDataType(const std::string& name) const {
return inputs_.find(name)->second;
return inputs_.find(name)->second;
}

ONNXTensorElementDataType SessionInfo::GetOutputDataType(const std::string& name) const {
return outputs_.find(name)->second;
return outputs_.find(name)->second;
}

Model::Model(std::unique_ptr<Config> config, const ProviderOptions* provider_options) : config_{std::move(config)} {
Expand Down

0 comments on commit 16f59e1

Please sign in to comment.