Skip to content

Commit

Permalink
renaming
Browse files Browse the repository at this point in the history
  • Loading branch information
wejoncy committed Dec 17, 2024
1 parent 2779e3d commit c7194ad
Show file tree
Hide file tree
Showing 6 changed files with 9 additions and 9 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ static const char* const kCoremlProviderOption_AllowLowPrecisionAccumulationOnGP
// 3. Hash of the input/output names of the model

// EP wounln't track the model change or responsible for the cache management.
static const char* const kCoremlProviderOption_ModelCacheDirectory = "ModelCachePath";
static const char* const kCoremlProviderOption_ModelCacheDirectory = "ModelCacheDirectory";

// User provided cache-key in metadata_props.
static const char* const kCOREML_CACHE_KEY = "CACHE_KEY";
Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/core/providers/coreml/builders/model_builder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -394,7 +394,7 @@ std::string GetModelOutputPath(const CoreMLOptions& coreml_options,
const GraphViewer& graph_viewer) {
const std::string& subgraph_name = graph_viewer.Name();
std::string path;
if (coreml_options.ModelCachePath().empty()) {
if (coreml_options.ModelCacheDirectory().empty()) {
// path is used to create the ML Package directory for ML Program, and for the model directly otherwise.
path = util::GetTemporaryFilePath();
if (!coreml_options.CreateMLProgram()) {
Expand All @@ -406,7 +406,7 @@ std::string GetModelOutputPath(const CoreMLOptions& coreml_options,
// int metadef_id = metadef_id_generator_.GenerateId(graph_viewer, model_hash);
// MakeString(user_provide_key, "_", COREML, "_", model_hash, "_", metadef_id);
std::string_view cache_key = std::string_view(subgraph_name).substr(0, subgraph_name.find_first_of("_"));
path = MakeString(std::string(coreml_options.ModelCachePath()), "/", cache_key);
path = MakeString(std::string(coreml_options.ModelCacheDirectory()), "/", cache_key);
ORT_THROW_IF_ERROR(Env::Default().CreateFolder(path));
// Write the model path to a file in the cache directory.
// This is for developers to know what the cached model is as we used a hash for the directory name.
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/core/providers/coreml/coreml_options.cc
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ void CoreMLOptions::ValidateAndParseProviderOption(const ProviderOptions& option
} else if (kCoremlProviderOption_AllowLowPrecisionAccumulationOnGPU == option.first) {
allow_low_precision_accumulation_on_gpu_ = option.second == "1";
} else if (kCoremlProviderOption_ModelCacheDirectory == option.first) {
model_cache_path_ = option.second;
model_cache_directory_ = option.second;
}
}
}
Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/core/providers/coreml/coreml_options.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ class CoreMLOptions {
bool profile_compute_plan_{false};
bool allow_low_precision_accumulation_on_gpu_{false};
// path to store the converted coreml model
std::string model_cache_path_;
std::string model_cache_directory_;

Check warning on line 21 in onnxruntime/core/providers/coreml/coreml_options.h

View workflow job for this annotation

GitHub Actions / Optional Lint C++

[cpplint] reported by reviewdog 🐶 Add #include <string> for string [build/include_what_you_use] [4] Raw Output: onnxruntime/core/providers/coreml/coreml_options.h:21: Add #include <string> for string [build/include_what_you_use] [4]

public:
explicit CoreMLOptions(uint32_t coreml_flags);
Expand All @@ -34,7 +34,7 @@ class CoreMLOptions {
bool UseStrategy(std::string_view strategy) const { return strategy_ == strategy; }
bool ProfileComputePlan() const { return profile_compute_plan_ && create_mlprogram_; }

std::string_view ModelCachePath() const { return model_cache_path_; }
std::string_view ModelCacheDirectory() const { return model_cache_directory_; }

private:
void ValidateAndParseProviderOption(const ProviderOptions& options);
Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/core/providers/coreml/model/model.mm
Original file line number Diff line number Diff line change
Expand Up @@ -420,7 +420,7 @@ Status CompileOrReadCachedModel(NSURL* modelUrl, const CoreMLOptions& coreml_opt
if (compiled_model_url == nil || cached_model_url == nil || compiled_model_path_from_url == nil) {
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, " compiled_model_url is nil or cached_model_url is nil");
}
if (coreml_options.ModelCachePath().empty()) {
if (coreml_options.ModelCacheDirectory().empty()) {
[compiled_model_path appendString:compiled_model_path_from_url];
return Status::OK();
}
Expand Down Expand Up @@ -479,7 +479,7 @@ Status Predict(const std::unordered_map<std::string, OnnxTensorData>& inputs,

void Execution::cleanup() {
// we keep the compiled model if the user has set a cache path
if (coreml_options_.ModelCachePath().size()) {
if (coreml_options_.ModelCacheDirectory().size()) {
return;
}
NSString* compiled_model_path = [compiled_model_url_ path];
Expand Down
2 changes: 1 addition & 1 deletion onnxruntime/test/perftest/command_args_parser.cc
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ namespace perftest {
"\t [CoreML only] [SpecializationStrategy]:[Default FastPrediction].\n"
"\t [CoreML only] [ProfileComputePlan]:[0 1].\n"
"\t [CoreML only] [AllowLowPrecisionAccumulationOnGPU]:[0 1].\n"
"\t [CoreML only] [ModelCachePath]:[path../a/b/c].\n"
"\t [CoreML only] [ModelCacheDirectory]:[path../a/b/c].\n"
"\t [Example] [For CoreML EP] -e coreml -i \"ModelFormat|MLProgram MLComputeUnits|CPUAndGPU\"\n"
"\n"
"\t [SNPE only] [runtime]: SNPE runtime, options: 'CPU', 'GPU', 'GPU_FLOAT16', 'DSP', 'AIP_FIXED_TF'. \n"
Expand Down

0 comments on commit c7194ad

Please sign in to comment.