From 1ff33ed6093f0bbd18ae46d260c2a2260cfba801 Mon Sep 17 00:00:00 2001 From: Arjun Suresh Date: Sun, 22 Sep 2024 02:52:04 +0530 Subject: [PATCH] MLperf docker fixes and nltf version fix for gptj accuracy check --- .github/workflows/test-mlperf-inference-gptj.yml | 2 +- script/process-mlperf-accuracy/_cm.json | 10 ++++++---- script/run-mlperf-inference-app/customize.py | 4 +++- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/.github/workflows/test-mlperf-inference-gptj.yml b/.github/workflows/test-mlperf-inference-gptj.yml index 41ce3413e..8847ff2eb 100644 --- a/.github/workflows/test-mlperf-inference-gptj.yml +++ b/.github/workflows/test-mlperf-inference-gptj.yml @@ -30,4 +30,4 @@ jobs: cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} - name: Test MLPerf Inference GPTJ run: | - cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --docker --model=gptj-99 --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --target_qps=1 --quiet --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops + cm run script --tags=run-mlperf,inference,_submission,_short --submitter="MLCommons" --docker --model=gptj-99 --backend=${{ matrix.backend }} --device=cuda --scenario=Offline --test_query_count=1 --precision=${{ matrix.precision }} --target_qps=1 --quiet --docker_it=no --docker_cm_repo=gateoverflow@cm4mlops --adr.compiler.tags=gcc --beam_size=1 diff --git a/script/process-mlperf-accuracy/_cm.json b/script/process-mlperf-accuracy/_cm.json index 9af1a4f99..7acbd4adf 100644 --- a/script/process-mlperf-accuracy/_cm.json +++ b/script/process-mlperf-accuracy/_cm.json @@ -61,15 +61,17 @@ }, { "tags": "get,generic-python-lib,_package.nltk", - "names": + "names": [ "pip-package", "nltk" - ] + ], + "version_max": "3.8.1", + "version_max_usable": "3.8.1" }, { "tags": "get,generic-python-lib,_package.evaluate", - "names": + "names": [ "pip-package", "evaluate" @@ -77,7 +79,7 @@ }, { "tags": "get,generic-python-lib,_package.absl-py", - "names": + "names": [ "pip-package", "absl-py" diff --git a/script/run-mlperf-inference-app/customize.py b/script/run-mlperf-inference-app/customize.py index 7f566cf84..9bc3ead12 100644 --- a/script/run-mlperf-inference-app/customize.py +++ b/script/run-mlperf-inference-app/customize.py @@ -131,6 +131,7 @@ def preprocess(i): add_deps = inp.get('add_deps', {}) ad = inp.get('ad', {}) adr = inp.get('adr', {}) + docker_it = inp.get('docker_it', '') adr_from_meta = i['run_script_input'].get('add_deps_recursive') for key in adr_from_meta: @@ -226,7 +227,8 @@ def preprocess(i): r = cm.access(ii) if r['return'] > 0: return r - if action == "docker": + if action == "docker" and str(docker_it).lower() not in ["no", "false", "0"]: + print(f"\nStop Running loadgen scenario: {scenario} and mode: {mode}") return {'return': 0} # We run commands interactively inside the docker container if state.get('docker', {}):