Skip to content

Commit

Permalink
Moving unit testing into SearchParameters test class
Browse files Browse the repository at this point in the history
  • Loading branch information
nv-braf committed May 4, 2024
1 parent 16ac9d8 commit 4383bdc
Show file tree
Hide file tree
Showing 2 changed files with 190 additions and 181 deletions.
181 changes: 0 additions & 181 deletions tests/test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,6 @@
from model_analyzer.config.input.config_command_report import ConfigCommandReport
from model_analyzer.config.input.config_defaults import (
DEFAULT_LLM_INFERENCE_OUTPUT_FIELDS,
DEFAULT_RUN_CONFIG_MAX_CONCURRENCY,
DEFAULT_RUN_CONFIG_MAX_INSTANCE_COUNT,
DEFAULT_RUN_CONFIG_MAX_MODEL_BATCH_SIZE,
DEFAULT_RUN_CONFIG_MIN_CONCURRENCY,
DEFAULT_RUN_CONFIG_MIN_INSTANCE_COUNT,
DEFAULT_RUN_CONFIG_MIN_MODEL_BATCH_SIZE,
)
from model_analyzer.config.input.config_enum import ConfigEnum
from model_analyzer.config.input.config_list_generic import ConfigListGeneric
Expand Down Expand Up @@ -2400,181 +2394,6 @@ def test_model_type_llm(self):
config.inference_output_fields, DEFAULT_LLM_INFERENCE_OUTPUT_FIELDS
)

def test_search_parameter_creation_default(self):
"""
Test that search parameters are correctly created in default optuna case
"""

args = [
"model-analyzer",
"profile",
"--model-repository",
"cli-repository",
"-f",
"path-to-config-file",
"--run-config-search-mode",
"optuna",
]

yaml_content = """
profile_models: add_sub
"""

config = self._evaluate_config(args, yaml_content)

analyzer = Analyzer(config, MagicMock(), MagicMock(), MagicMock())
analyzer._populate_search_parameters()

# batch_sizes
batch_sizes = analyzer._search_parameters["add_sub"].get_parameter(
"batch_sizes"
)
self.assertEqual(ParameterUsage.MODEL, batch_sizes.usage)
self.assertEqual(ParameterCategory.EXPONENTIAL, batch_sizes.category)
self.assertEqual(
log2(DEFAULT_RUN_CONFIG_MIN_MODEL_BATCH_SIZE), batch_sizes.min_range
)
self.assertEqual(
log2(DEFAULT_RUN_CONFIG_MAX_MODEL_BATCH_SIZE), batch_sizes.max_range
)

# concurrency
concurrency = analyzer._search_parameters["add_sub"].get_parameter(
"concurrency"
)
self.assertEqual(ParameterUsage.RUNTIME, concurrency.usage)
self.assertEqual(ParameterCategory.EXPONENTIAL, concurrency.category)
self.assertEqual(
log2(DEFAULT_RUN_CONFIG_MIN_CONCURRENCY), concurrency.min_range
)
self.assertEqual(
log2(DEFAULT_RUN_CONFIG_MAX_CONCURRENCY), concurrency.max_range
)

# instance_group
instance_group = analyzer._search_parameters["add_sub"].get_parameter(
"instance_group"
)
self.assertEqual(ParameterUsage.MODEL, instance_group.usage)
self.assertEqual(ParameterCategory.INTEGER, instance_group.category)
self.assertEqual(
DEFAULT_RUN_CONFIG_MIN_INSTANCE_COUNT, instance_group.min_range
)
self.assertEqual(
DEFAULT_RUN_CONFIG_MAX_INSTANCE_COUNT, instance_group.max_range
)

def test_search_parameter_creation_multi_model_non_default(self):
"""
Test that search parameters are correctly created in
a multi-model non-default optuna case
"""

args = [
"model-analyzer",
"profile",
"--model-repository",
"cli-repository",
"-f",
"path-to-config-file",
"--run-config-search-mode",
"optuna",
]

yaml_content = """
run_config_search_mode: optuna
profile_models:
add_sub:
parameters:
batch_sizes: [16, 32, 64]
model_config_parameters:
dynamic_batching:
max_queue_delay_microseconds: [100, 200, 300]
instance_group:
- kind: KIND_GPU
count: [1, 2, 3, 4]
mult_div:
parameters:
concurrency: [1, 8, 64, 256]
"""

config = self._evaluate_config(args, yaml_content)

analyzer = Analyzer(config, MagicMock(), MagicMock(), MagicMock())
analyzer._populate_search_parameters()

# ADD_SUB
# batch_sizes
batch_sizes = analyzer._search_parameters["add_sub"].get_parameter(
"batch_sizes"
)
self.assertEqual(ParameterUsage.MODEL, batch_sizes.usage)
self.assertEqual(ParameterCategory.LIST, batch_sizes.category)
self.assertEqual([16, 32, 64], batch_sizes.enumerated_list)

# concurrency
concurrency = analyzer._search_parameters["add_sub"].get_parameter(
"concurrency"
)
self.assertEqual(ParameterUsage.RUNTIME, concurrency.usage)
self.assertEqual(ParameterCategory.EXPONENTIAL, concurrency.category)
self.assertEqual(
log2(DEFAULT_RUN_CONFIG_MIN_CONCURRENCY), concurrency.min_range
)
self.assertEqual(
log2(DEFAULT_RUN_CONFIG_MAX_CONCURRENCY), concurrency.max_range
)

# instance_group
instance_group = analyzer._search_parameters["add_sub"].get_parameter(
"instance_group"
)
self.assertEqual(ParameterUsage.MODEL, instance_group.usage)
self.assertEqual(ParameterCategory.LIST, instance_group.category)
self.assertEqual([1, 2, 3, 4], instance_group.enumerated_list)

instance_group = analyzer._search_parameters["add_sub"].get_parameter(
"max_queue_delay_microseconds"
)
self.assertEqual(ParameterUsage.MODEL, instance_group.usage)
self.assertEqual(ParameterCategory.LIST, instance_group.category)
self.assertEqual([100, 200, 300], instance_group.enumerated_list)

# MULT_DIV
# batch_sizes
batch_sizes = analyzer._search_parameters["mult_div"].get_parameter(
"batch_sizes"
)
self.assertEqual(ParameterUsage.MODEL, batch_sizes.usage)
self.assertEqual(ParameterCategory.EXPONENTIAL, batch_sizes.category)
self.assertEqual(
log2(DEFAULT_RUN_CONFIG_MIN_MODEL_BATCH_SIZE), batch_sizes.min_range
)
self.assertEqual(
log2(DEFAULT_RUN_CONFIG_MAX_MODEL_BATCH_SIZE), batch_sizes.max_range
)

# concurrency
concurrency = analyzer._search_parameters["mult_div"].get_parameter(
"concurrency"
)
self.assertEqual(ParameterUsage.RUNTIME, concurrency.usage)
self.assertEqual(ParameterCategory.LIST, concurrency.category)
self.assertEqual([1, 8, 64, 256], concurrency.enumerated_list)

# instance_group
instance_group = analyzer._search_parameters["mult_div"].get_parameter(
"instance_group"
)
self.assertEqual(ParameterUsage.MODEL, instance_group.usage)
self.assertEqual(ParameterCategory.INTEGER, instance_group.category)
self.assertEqual(
DEFAULT_RUN_CONFIG_MIN_INSTANCE_COUNT, instance_group.min_range
)
self.assertEqual(
DEFAULT_RUN_CONFIG_MAX_INSTANCE_COUNT, instance_group.max_range
)

def _test_request_rate_config_conflicts(
self, base_args: List[Any], yaml_content: str
) -> None:
Expand Down
Loading

0 comments on commit 4383bdc

Please sign in to comment.