forked from intel/ai-reference-models
-
Notifications
You must be signed in to change notification settings - Fork 0
/
inference.sh
executable file
·92 lines (80 loc) · 3.15 KB
/
inference.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
#!/usr/bin/env bash
#
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
MODEL_DIR=${MODEL_DIR-$PWD}
if [ -z "${OUTPUT_DIR}" ]; then
echo "The required environment variable OUTPUT_DIR has not been set"
exit 1
fi
# Create the output directory in case it doesn't already exist
mkdir -p ${OUTPUT_DIR}
if [ -z "${PRECISION}" ]; then
echo "The required environment variable PRECISION has not been set"
echo "Please set PRECISION to fp32, int8, or bfloat16."
exit 1
fi
if [[ $PRECISION != "fp32" ]] && [[ $PRECISION != "int8" ]] && [[ $PRECISION != "bfloat16" ]]; then
echo "The specified precision '${PRECISION}' is unsupported."
echo "Supported precisions are: fp32, bfloat16, and int8"
exit 1
fi
if [ -z "${TF_MODELS_DIR}" ]; then
echo "The required environment variable TF_MODELS_DIR has not been set."
echo "Set TF_MODELS_DIR to the directory where the tensorflow/models repo has been cloned."
exit 1
fi
if [ ! -d "${TF_MODELS_DIR}" ]; then
echo "The TF_MODELS_DIR directory '${TF_MODELS_DIR}' does not exist"
exit 1
fi
if [ -z "${PRETRAINED_MODEL}" ]; then
if [[ $PRECISION == "int8" ]]; then
PRETRAINED_MODEL="${MODEL_DIR}/pretrained_model/ssd_resnet34_int8_bs1_pretrained_model.pb"
elif [[ $PRECISION == "bfloat16" || $PRECISION == "fp32" ]]; then
PRETRAINED_MODEL="${MODEL_DIR}/pretrained_model/ssd_resnet34_fp32_bs1_pretrained_model.pb"
else
echo "The specified precision '${PRECISION}' is unsupported."
echo "Supported precisions are: fp32, bfloat16, and int8"
exit 1
fi
if [[ ! -f "${PRETRAINED_MODEL}" ]]; then
echo "The pretrained model could not be found. Please set the PRETRAINED_MODEL env var to point to the frozen graph file."
exit 1
fi
elif [[ ! -f "${PRETRAINED_MODEL}" ]]; then
echo "The file specified by the PRETRAINED_MODEL environment variable (${PRETRAINED_MODEL}) does not exist."
exit 1
fi
export PYTHONPATH=${PYTHONPATH}:${TF_MODELS_DIR}/research
export PYTHONPATH=${PYTHONPATH}:${TF_BENCHMARKS_DIR}/scripts/tf_cnn_benchmarks
# If batch size env is not mentioned, then the workload will run with the default batch size.
if [ -z "${BATCH_SIZE}"]; then
BATCH_SIZE="1"
echo "Running with default batch size of ${BATCH_SIZE}"
fi
source "${MODEL_DIR}/quickstart/common/utils.sh"
_command python ${MODEL_DIR}/benchmarks/launch_benchmark.py \
--in-graph $PRETRAINED_MODEL \
--model-source-dir $TF_MODELS_DIR \
--model-name ssd-resnet34 \
--framework tensorflow \
--precision ${PRECISION} \
--mode inference \
--socket-id 0 \
--batch-size ${BATCH_SIZE} \
--benchmark-only \
--output-dir ${OUTPUT_DIR} \
$@