Skip to content

Commit

Permalink
Added L0 quick search test (#521)
Browse files Browse the repository at this point in the history
* Added L0 quick search test

* Fixing copyrights
  • Loading branch information
nv-braf authored and mc-nv committed Sep 12, 2022
1 parent 25b27d6 commit c157d15
Show file tree
Hide file tree
Showing 3 changed files with 244 additions and 0 deletions.
92 changes: 92 additions & 0 deletions qa/L0_quick_search/check_results.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from collections import defaultdict
import argparse
import sys
import yaml
import os


class TestOutputValidator:
"""
Functions that validate the output
of the test
"""

def __init__(self, config, test_name, analyzer_log):
self._config = config
self._models = config['profile_models']
self._analyzer_log = analyzer_log

check_function = self.__getattribute__(f'check_{test_name}')

if check_function():
sys.exit(0)
else:
sys.exit(1)

def check_profile_logs(self):
"""
Check that each model was profiled the number of times
corresponding with batch size and concurrency combinations
(No model config parameter combos expected here!)
"""

with open(self._analyzer_log, 'r') as f:
log_contents = f.read()

expected_min_num_measurements = 20
expected_max_num_measurements = 80
for model in self._models:
token = f"Profiling {model}_config"
token_idx = 0
found_count = 0
while True:
token_idx = log_contents.find(token, token_idx + 1)
if token_idx == -1:
break
found_count += 1
if found_count < expected_min_num_measurements or found_count > expected_max_num_measurements:
print(
f"\n***\n*** Expected range of measurements for {model} : {expected_min_num_measurements} to {expected_max_num_measurements}. "
f"Found {found_count}. \n***")
return False
return True


if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f',
'--config-file',
type=str,
required=True,
help='The path to the config yaml file.')
parser.add_argument('-l',
'--analyzer-log-file',
type=str,
required=True,
help='The full path to the analyzer log.')
parser.add_argument('-t',
'--test-name',
type=str,
required=True,
help='The name of the test to be run.')
args = parser.parse_args()

with open(args.config_file, 'r') as f:
config = yaml.safe_load(f)

TestOutputValidator(config, args.test_name, args.analyzer_log_file)
93 changes: 93 additions & 0 deletions qa/L0_quick_search/test.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

ANALYZER_LOG="test.log"
source ../common/util.sh
source ../common/check_analyzer_results.sh

rm -f *.log
rm -rf results && mkdir -p results

# Set test parameters
MODEL_ANALYZER="`which model-analyzer`"
REPO_VERSION=${NVIDIA_TRITON_SERVER_VERSION}
MODEL_REPOSITORY=${MODEL_REPOSITORY:="/mnt/nvdl/datasets/inferenceserver/$REPO_VERSION/libtorch_model_store"}
QA_MODELS="resnet50_libtorch"
MODEL_NAMES="$(echo $QA_MODELS | sed 's/ /,/g')"
TRITON_LAUNCH_MODE=${TRITON_LAUNCH_MODE:="local"}
CLIENT_PROTOCOL="grpc"
PORTS=(`find_available_ports 3`)
GPUS=(`get_all_gpus_uuids`)
OUTPUT_MODEL_REPOSITORY=${OUTPUT_MODEL_REPOSITORY:=`get_output_directory`}
CONFIG_FILE="config.yml"
EXPORT_PATH="`pwd`/results"
FILENAME_SERVER_ONLY="server-metrics.csv"
FILENAME_INFERENCE_MODEL="model-metrics-inference.csv"
FILENAME_GPU_MODEL="model-metrics-gpu.csv"

rm -rf $OUTPUT_MODEL_REPOSITORY

python3 test_config_generator.py --profile-models $MODEL_NAMES

# Run the analyzer and check the results
RET=0

set +e

MODEL_ANALYZER_ARGS="-m $MODEL_REPOSITORY -f $CONFIG_FILE"
MODEL_ANALYZER_ARGS="$MODEL_ANALYZER_ARGS --client-protocol=$CLIENT_PROTOCOL --triton-launch-mode=$TRITON_LAUNCH_MODE --run-config-search-mode=quick"
MODEL_ANALYZER_ARGS="$MODEL_ANALYZER_ARGS --triton-http-endpoint localhost:${PORTS[0]} --triton-grpc-endpoint localhost:${PORTS[1]}"
MODEL_ANALYZER_ARGS="$MODEL_ANALYZER_ARGS --triton-metrics-url http://localhost:${PORTS[2]}/metrics"
MODEL_ANALYZER_ARGS="$MODEL_ANALYZER_ARGS --output-model-repository-path $OUTPUT_MODEL_REPOSITORY --override-output-model-repository"
MODEL_ANALYZER_ARGS="$MODEL_ANALYZER_ARGS -e $EXPORT_PATH --filename-server-only=$FILENAME_SERVER_ONLY"
MODEL_ANALYZER_ARGS="$MODEL_ANALYZER_ARGS --filename-model-inference=$FILENAME_INFERENCE_MODEL --filename-model-gpu=$FILENAME_GPU_MODEL"
MODEL_ANALYZER_ARGS="$MODEL_ANALYZER_ARGS --skip-summary-reports"
MODEL_ANALYZER_SUBCOMMAND="profile"
run_analyzer
if [ $? -ne 0 ]; then
echo -e "\n***\n*** Test Failed. model-analyzer $MODEL_ANALYZER_SUBCOMMAND exited with non-zero exit code. \n***"
cat $ANALYZER_LOG
RET=1
else
# Check the Analyzer log for correct output
TEST_NAME='profile_logs'
python3 check_results.py -f $CONFIG_FILE -t $TEST_NAME -l $ANALYZER_LOG
if [ $? -ne 0 ]; then
echo -e "\n***\n*** Test Output Verification Failed for $TEST_NAME test.\n***"
cat $ANALYZER_LOG
RET=1
fi

SERVER_METRICS_FILE=${EXPORT_PATH}/results/${FILENAME_SERVER_ONLY}
MODEL_METRICS_GPU_FILE=${EXPORT_PATH}/results/${FILENAME_GPU_MODEL}
MODEL_METRICS_INFERENCE_FILE=${EXPORT_PATH}/results/${FILENAME_INFERENCE_MODEL}

for file in SERVER_METRICS_FILE, MODEL_METRICS_GPU_FILE, MODEL_METRICS_INFERENCE_FILE; do
check_no_csv_exists $file
if [ $? -ne 0 ]; then
echo -e "\n***\n*** Test Output Verification Failed.\n***"
cat $ANALYZER_LOG
RET=1
fi
done
fi
set -e

if [ $RET -eq 0 ]; then
echo -e "\n***\n*** Test PASSED\n***"
else
echo -e "\n***\n*** Test FAILED\n***"
fi

exit $RET
59 changes: 59 additions & 0 deletions qa/L0_quick_search/test_config_generator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import argparse
import yaml


class TestConfigGenerator:
"""
This class contains functions that
create configs for various test scenarios.
The `setup` function does the work common to all tests
TO ADD A TEST: Simply add a member function whose name starts
with 'generate'.
"""

def __init__(self):
test_functions = [
self.__getattribute__(name)
for name in dir(self)
if name.startswith('generate')
]

for test_function in test_functions:
self.setup()
test_function()

def setup(self):
parser = argparse.ArgumentParser()
parser.add_argument('-m',
'--profile-models',
type=str,
required=True,
help='The config file for this test')

args = parser.parse_args()
self.config = {}
self.config['profile_models'] = sorted(args.profile_models.split(','))

def generate_config(self):
with open('config.yml', 'w+') as f:
yaml.dump(self.config, f)


if __name__ == '__main__':
TestConfigGenerator()

0 comments on commit c157d15

Please sign in to comment.