diff --git a/.github/workflows/check-all-broken-links.md b/.github/workflows/check-all-broken-links.md deleted file mode 100644 index 77f11786d..000000000 --- a/.github/workflows/check-all-broken-links.md +++ /dev/null @@ -1,17 +0,0 @@ -name: Check .md README files for broken links - -on: - push: [main, mlperf-inference] - -jobs: - markdown-link-check: - runs-on: ubuntu-latest - # check out the latest version of the code - steps: - - uses: actions/checkout@v3 - - # Checks the status of hyperlinks in .md files in verbose mode - - name: Check links - uses: gaurav-nelson/github-action-markdown-link-check@v1 - with: - use-quiet-mode: 'yes' diff --git a/.github/workflows/test-cm-scripts.yml b/.github/workflows/test-cm-scripts.yml index c0d96b972..e3c0c11a6 100644 --- a/.github/workflows/test-cm-scripts.yml +++ b/.github/workflows/test-cm-scripts.yml @@ -14,11 +14,12 @@ on: jobs: build: - runs-on: ubuntu-latest strategy: fail-fast: false matrix: + on: [ubuntu-latest, windows-latest, macos-latest] python-version: ["3.12", "3.9"] + runs-on: "${{ matrix.on }}" steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/test-mlperf-inference-resnet50.yml b/.github/workflows/test-mlperf-inference-resnet50.yml index 02478285a..12797a29e 100644 --- a/.github/workflows/test-mlperf-inference-resnet50.yml +++ b/.github/workflows/test-mlperf-inference-resnet50.yml @@ -1,7 +1,7 @@ # This workflow will install Python dependencies, run tests and lint with a variety of Python versions # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions -name: MLPerf inference resnet50 +name: MLPerf inference ResNet50 on: pull_request: @@ -10,7 +10,6 @@ on: - '.github/workflows/test-mlperf-inference-resnet50.yml' - '**' - '!**.md' - jobs: build: runs-on: ${{ matrix.os }} @@ -33,7 +32,7 @@ jobs: - os: windows-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v3 with: @@ -44,4 +43,4 @@ jobs: cm pull repo --url=${{ github.event.pull_request.head.repo.html_url }} --checkout=${{ github.event.pull_request.head.ref }} - name: Test MLPerf Inference ResNet50 run: | - cm run script --tags=run,mlperf,inference,generate-run-cmds,_submission,_short --submitter="cTuning" --hw_name=default --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet + cm run script --tags=run-mlperf,inference,_submission,_short --submitter="cTuning" --hw_name=default --model=resnet50 --implementation=${{ matrix.implementation }} --backend=${{ matrix.backend }} --device=cpu --scenario=Offline --test_query_count=500 --target_qps=1 -v --quiet diff --git a/script/download-file/customize.py b/script/download-file/customize.py index 31116579b..c8834f1ce 100644 --- a/script/download-file/customize.py +++ b/script/download-file/customize.py @@ -1,6 +1,6 @@ from cmind import utils import os -import hashlib +import subprocess def preprocess(i): @@ -20,6 +20,9 @@ def preprocess(i): q = '"' if os_info['platform'] == 'windows' else "'" + x='*' if os_info['platform'] == 'windows' else '' + x_c='-s' if os_info['platform'] == 'darwin_off' else '' + if env.get('CM_DOWNLOAD_LOCAL_FILE_PATH'): filepath = env['CM_DOWNLOAD_LOCAL_FILE_PATH'] @@ -80,39 +83,80 @@ def preprocess(i): if tool == "cmutil": print ('') + cmutil_require_download = 0 + if env.get('CM_DOWNLOAD_CHECKSUM_FILE', '') != '': + checksum_cmd = f"cd {q}{filepath}{q} {xsep} md5sum -c{x_c} {x}{q}{env['CM_DOWNLOAD_CHECKSUM_FILE']}{q}" + checksum_result = subprocess.run(checksum_cmd, cwd=f'{q}{filepath}{q}', capture_output=True, text=True, shell=True) + elif env.get('CM_DOWNLOAD_CHECKSUM', '') != '': + checksum_cmd = f"echo {env.get('CM_DOWNLOAD_CHECKSUM')} {x}{q}{env['CM_DOWNLOAD_FILENAME']}{q} | md5sum -c{x_c} -" + checksum_result = subprocess.run(checksum_cmd, capture_output=True, text=True, shell=True) + if env.get('CM_DOWNLOAD_CHECKSUM_FILE', '') != '' or env.get('CM_DOWNLOAD_CHECKSUM', '') != '': + print(checksum_result) #for debugging + if "checksum did not match" in checksum_result.stderr.lower(): + computed_checksum = subprocess.run(f"md5sum {env['CM_DOWNLOAD_FILENAME']}", capture_output=True, text=True, shell=True).stdout.split(" ")[0] + print(f"WARNING: File already present, mismatch between original checksum({env.get('CM_DOWNLOAD_CHECKSUM')}) and computed checksum({computed_checksum}). Deleting the already present file and downloading new.") + try: + os.remove(env['CM_DOWNLOAD_FILENAME']) + print(f"File {env['CM_DOWNLOAD_FILENAME']} deleted successfully.") + except PermissionError: + return {"return":1, "error":f"Permission denied to delete file {env['CM_DOWNLOAD_FILENAME']}."} + cmutil_require_download = 1 + elif "no such file" in checksum_result.stderr.lower(): + print(f"No file {env['CM_DOWNLOAD_FILENAME']}. Downloading through cmutil.") + cmutil_require_download = 1 + else: + print(f"WARNING: File {env['CM_DOWNLOAD_FILENAME']} already present, original checksum and computed checksum matches! Skipping Download..") + else: + cmutil_require_download = 1 + + if cmutil_require_download == 1: + cm = automation.cmind + for i in range(1,5): + r = cm.access({'action':'download_file', + 'automation':'utils,dc2743f8450541e3', + 'url':url, + 'verify': verify_ssl}) + if r['return'] == 0: break + oldurl = url + url = env.get('CM_DOWNLOAD_URL'+str(i),'') + if url == '': + break + print(f"Download from {oldurl} failed, trying from {url}") + + if r['return']>0: return r + + env['CM_DOWNLOAD_CMD'] = "" + env['CM_DOWNLOAD_FILENAME'] = r['filename'] - cm = automation.cmind + elif tool == "wget": + if env.get('CM_DOWNLOAD_FILENAME', '') != '': + extra_download_options +=f" --tries=3 -O {q}{env['CM_DOWNLOAD_FILENAME']}{q} " + env['CM_DOWNLOAD_CMD'] = f"wget -nc {extra_download_options} {url}" for i in range(1,5): - r = cm.access({'action':'download_file', - 'automation':'utils,dc2743f8450541e3', - 'url':url, - 'verify': verify_ssl}) - if r['return'] == 0: break - oldurl = url url = env.get('CM_DOWNLOAD_URL'+str(i),'') if url == '': break - print(f"Download from {oldurl} failed, trying from {url}") - - if r['return']>0: return r - - env['CM_DOWNLOAD_CMD'] = "" - env['CM_DOWNLOAD_FILENAME'] = r['filename'] - - elif tool == "wget": - if env.get('CM_DOWNLOAD_FILENAME', '') != '': - extra_download_options +=f" -O {q}{env['CM_DOWNLOAD_FILENAME']}{q} " - env['CM_DOWNLOAD_CMD'] = f"wget -nc {extra_download_options} {url}" + env['CM_DOWNLOAD_CMD'] += f" || ((rm -f {env['CM_DOWNLOAD_FILENAME']} || true) && wget -nc {extra_download_options} {url})" elif tool == "curl": if env.get('CM_DOWNLOAD_FILENAME', '') != '': extra_download_options +=f" --output {q}{env['CM_DOWNLOAD_FILENAME']}{q} " env['CM_DOWNLOAD_CMD'] = f"curl {extra_download_options} {url}" + for i in range(1,5): + url = env.get('CM_DOWNLOAD_URL'+str(i),'') + if url == '': + break + env['CM_DOWNLOAD_CMD'] += f" || ((rm -f {env['CM_DOWNLOAD_FILENAME']} || true) && curl {extra_download_options} {url})" elif tool == "gdown": env['CM_DOWNLOAD_CMD'] = f"gdown {extra_download_options} {url}" + for i in range(1,5): + url = env.get('CM_DOWNLOAD_URL'+str(i),'') + if url == '': + break + env['CM_DOWNLOAD_CMD'] += f" || ((rm -f {env['CM_DOWNLOAD_FILENAME']} || true) && gdown {extra_download_options} {url})" elif tool == "rclone": if env.get('CM_RCLONE_CONFIG_CMD', '') != '': @@ -127,6 +171,16 @@ def preprocess(i): env['CM_DOWNLOAD_CMD'] = f"rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), temp_download_file)}{q} -P" else: env['CM_DOWNLOAD_CMD'] = f"rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), env['CM_DOWNLOAD_FILENAME'])}{q} -P" + for i in range(1,5): + url = env.get('CM_DOWNLOAD_URL'+str(i),'') + if url == '': + break + if env["CM_HOST_OS_TYPE"] == "windows": + url = url.replace("%", "%%") + temp_download_file = env['CM_DOWNLOAD_FILENAME'].replace("%", "%%") + env['CM_DOWNLOAD_CMD'] = f" || ((rm -f {env['CM_DOWNLOAD_FILENAME']} || true) && rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), temp_download_file)}{q} -P)" + else: + env['CM_DOWNLOAD_CMD'] = f" || ((rm -f {env['CM_DOWNLOAD_FILENAME']} || true) && rclone {rclone_copy_using} {q}{url}{q} {q}{os.path.join(os.getcwd(), env['CM_DOWNLOAD_FILENAME'])}{q} -P" filename = env['CM_DOWNLOAD_FILENAME'] env['CM_DOWNLOAD_DOWNLOADED_FILENAME'] = filename @@ -136,13 +190,11 @@ def preprocess(i): env['CM_DOWNLOAD_DOWNLOADED_PATH'] = filepath - x='*' if os_info['platform'] == 'windows' else '' - x_c=' -s ' if os_info['platform'] == 'darwin_off' else '' #not using this option for now - #verify checksum if file already present + # verify checksum if file already present if env.get('CM_DOWNLOAD_CHECKSUM_FILE', '') != '': - env['CM_DOWNLOAD_CHECKSUM_CMD'] = f"cd {q}{filepath}{q} {xsep} md5sum -c{x_c} {x}{q}{env['CM_DOWNLOAD_CHECKSUM_FILE']}{q}" + env['CM_DOWNLOAD_CHECKSUM_CMD'] = f"cd {q}{filepath}{q} {xsep} md5sum -c {x_c} {x}{q}{env['CM_DOWNLOAD_CHECKSUM_FILE']}{q}" elif env.get('CM_DOWNLOAD_CHECKSUM', '') != '': - env['CM_DOWNLOAD_CHECKSUM_CMD'] = "echo {} {}{}{}{} | md5sum {}-c -".format(env.get('CM_DOWNLOAD_CHECKSUM'), x, q, env['CM_DOWNLOAD_FILENAME'], q, x_c) + env['CM_DOWNLOAD_CHECKSUM_CMD'] = "echo {} {}{}{}{} | md5sum {} -c -".format(env.get('CM_DOWNLOAD_CHECKSUM'), x, q, env['CM_DOWNLOAD_FILENAME'], q, x_c) else: env['CM_DOWNLOAD_CHECKSUM_CMD'] = "" diff --git a/script/get-dataset-coco2014/run.sh b/script/get-dataset-coco2014/run.sh index 31c60b52a..f37ba603b 100644 --- a/script/get-dataset-coco2014/run.sh +++ b/script/get-dataset-coco2014/run.sh @@ -16,6 +16,13 @@ if [[ ${CM_DATASET_CALIBRATION} == "no" ]]; then else max_images="" fi + + # deleting existing incomplete downloads if any + if [ -f "${INSTALL_DIR}/download_aux/annotations_trainval2014.zip" ]; then + echo "File annotations_trainval2014.zip already exists. Deleting it." + rm ${INSTALL_DIR}/download_aux/annotations_trainval2014.zip + fi + cmd="./download-coco-2014.sh -d ${INSTALL_DIR} ${max_images}" echo $cmd eval $cmd diff --git a/script/get-dataset-imagenet-aux/run.bat b/script/get-dataset-imagenet-aux/run.bat deleted file mode 100644 index f045ee689..000000000 --- a/script/get-dataset-imagenet-aux/run.bat +++ /dev/null @@ -1,16 +0,0 @@ -echo. - -wget -nc %CM_WGET_URL% --no-check-certificate -IF %ERRORLEVEL% NEQ 0 EXIT 1 - -mkdir data - -gzip -d caffe_ilsvrc12.tar.gz -IF %ERRORLEVEL% NEQ 0 EXIT 1 - -tar -C data -xvf caffe_ilsvrc12.tar -IF %ERRORLEVEL% NEQ 0 EXIT 1 - -del /Q /S caffe_ilsvrc12.tar - -echo CM_DATASET_AUX_PATH=%CD%\data > tmp-run-env.out diff --git a/script/get-dataset-kits19/_cm.json b/script/get-dataset-kits19/_cm.json index 816a54459..3e4f95548 100644 --- a/script/get-dataset-kits19/_cm.json +++ b/script/get-dataset-kits19/_cm.json @@ -15,6 +15,13 @@ "deps": [ { "tags": "detect,os" + }, + { + "names": [ + "python3", + "python" + ], + "tags": "get,python3" } ], "new_env_keys": [ diff --git a/script/get-generic-sys-util/_cm.json b/script/get-generic-sys-util/_cm.json index 63dc917dc..856ed0b65 100644 --- a/script/get-generic-sys-util/_cm.json +++ b/script/get-generic-sys-util/_cm.json @@ -195,7 +195,7 @@ "state": { "libffi_dev": { "apt": "libffi-dev", - "brew": "libffi-dev", + "brew": "libffi", "dnf": "libffi-devel", "yum": "libffi-devel" }