From 37966aec35c73614da130c945e9bd4f14cb424fb Mon Sep 17 00:00:00 2001 From: Unbinilium <15633984+Unbinilium@users.noreply.github.com> Date: Fri, 7 Jul 2023 23:27:02 +0800 Subject: [PATCH 1/4] docs: update cli arguments --- notebooks/Google-Colab-PFLD-Grove-Example.ipynb | 14 ++++++-------- .../Google-Colab-YOLOv5-A1101-Example.ipynb | 16 +++++++--------- 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/notebooks/Google-Colab-PFLD-Grove-Example.ipynb b/notebooks/Google-Colab-PFLD-Grove-Example.ipynb index 0338c0a3..09f1ec78 100644 --- a/notebooks/Google-Colab-PFLD-Grove-Example.ipynb +++ b/notebooks/Google-Colab-PFLD-Grove-Example.ipynb @@ -279,13 +279,12 @@ "outputs": [], "source": [ "!${PYTHON_EXEC} tools/train.py \\\n", - " pose \\\n", " configs/pfld/pfld_mbv2n_112.py \\\n", " --cfg-options \\\n", " epochs=50 \\\n", " num_classes=1 \\\n", " data_root='datasets/meter' \\\n", - " load_from=pre-train/pfld_mv2n_112.pth " + " load_from='pre-train/pfld_mv2n_112.pth'" ] }, { @@ -362,8 +361,7 @@ }, "outputs": [], "source": [ - "!${PYTHON_EXEC} tools/test.py \\\n", - " pose \\\n", + "!${PYTHON_EXEC} tools/inference.py \\\n", " configs/pfld/pfld_mbv2n_112.py \\\n", " \"$(cat work_dirs/pfld_mbv2n_112/last_checkpoint)\" \\\n", " --dump work_dirs/pfld_mbv2n_112/last_checkpoint.pkl \\\n", @@ -425,10 +423,10 @@ }, "outputs": [], "source": [ - "!${PYTHON_EXEC} tools/torch2tflite.py \\\n", + "!${PYTHON_EXEC} tools/export.py \\\n", " configs/pfld/pfld_mbv2n_112.py \\\n", - " --checkpoint $(cat work_dirs/pfld_mbv2n_112/last_checkpoint) \\\n", - " --type int8 \\\n", + " $(cat work_dirs/pfld_mbv2n_112/last_checkpoint) \\\n", + " tflite \\\n", " --cfg-options \\\n", " data_root='datasets/meter'" ] @@ -650,7 +648,7 @@ "!cd example/grove && \\\n", " ${PYTHON_EXEC} tools/ufconv/uf2conv.py \\\n", " -t 1 \\\n", - " -c \"$(cat ../../work_dirs/pfld_mbv2n_112/last_checkpoint)_int8.tflite\" \\\n", + " -c \"$(cat ../../work_dirs/pfld_mbv2n_112/last_checkpoint | sed -e 's/.pth/_int8.tflite/g')\" \\\n", " -o model.uf2" ] }, diff --git a/notebooks/Google-Colab-YOLOv5-A1101-Example.ipynb b/notebooks/Google-Colab-YOLOv5-A1101-Example.ipynb index 3fd9a9b7..9c245ef0 100644 --- a/notebooks/Google-Colab-YOLOv5-A1101-Example.ipynb +++ b/notebooks/Google-Colab-YOLOv5-A1101-Example.ipynb @@ -204,7 +204,7 @@ "\n", "- `data_root` - the datasets path, which located at path `datasets/digital_meter`\n", "\n", - "- `max_epochs`- the train epochs, we use `50` to reduce the training time\n", + "- `epochs`- the train epochs, we use `50` to reduce the training time\n", "\n", "- `num_classes` - the calsses number of datasets, we use `11` here ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'N']\n", "\n", @@ -220,10 +220,9 @@ "outputs": [], "source": [ "!${PYTHON_EXEC} tools/train.py \\\n", - " det \\\n", " configs/yolov5/yolov5_tiny_1xb16_300e_coco.py \\\n", " --cfg-options \\\n", - " max_epochs=50 \\\n", + " epochs=50 \\\n", " num_classes=11 \\\n", " data_root='datasets/digital_meter/'" ] @@ -290,8 +289,7 @@ }, "outputs": [], "source": [ - "!${PYTHON_EXEC} tools/test.py \\\n", - " det \\\n", + "!${PYTHON_EXEC} tools/inference.py \\\n", " configs/yolov5/yolov5_tiny_1xb16_300e_coco.py \\\n", " \"$(cat work_dirs/yolov5_tiny_1xb16_300e_coco/last_checkpoint)\" \\\n", " --dump work_dirs/yolov5_tiny_1xb16_300e_coco/last_checkpoint.pkl \\\n", @@ -345,10 +343,10 @@ }, "outputs": [], "source": [ - "!${PYTHON_EXEC} tools/torch2tflite.py \\\n", + "!${PYTHON_EXEC} tools/export.py \\\n", " configs/yolov5/yolov5_tiny_1xb16_300e_coco.py \\\n", - " --checkpoint $(cat work_dirs/yolov5_tiny_1xb16_300e_coco/last_checkpoint) \\\n", - " --type int8 \\\n", + " $(cat work_dirs/yolov5_tiny_1xb16_300e_coco/last_checkpoint) \\\n", + " tflite \\\n", " --cfg-options \\\n", " data_root='datasets/digital_meter/' \\\n", " num_classes=11" @@ -540,7 +538,7 @@ "!cd example/grove && \\\n", " ${PYTHON_EXEC} tools/ufconv/uf2conv.py \\\n", " -t 18 \\\n", - " -c \"$(cat ../../work_dirs/yolov5_tiny_1xb16_300e_coco/last_checkpoint)_int8.tflite\" \\\n", + " -c \"$(cat ../../work_dirs/yolov5_tiny_1xb16_300e_coco/last_checkpoint | sed -e 's/.pth/_int8.tflite/g')\" \\\n", " -o model.uf2" ] }, From 81aa67aa4b00a47125c4576f7e904af708195cf4 Mon Sep 17 00:00:00 2001 From: Unbinilium <15633984+Unbinilium@users.noreply.github.com> Date: Fri, 7 Jul 2023 23:40:36 +0800 Subject: [PATCH 2/4] ci: fix data root issue, reduce train epochs --- scripts/test_functional.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/test_functional.sh b/scripts/test_functional.sh index 3eb195e2..b346ca9d 100644 --- a/scripts/test_functional.sh +++ b/scripts/test_functional.sh @@ -47,7 +47,7 @@ functional_test_core() CONFIG_FILE_NAME="$(basename -- ${CONFIG_FILE})" DATASETS_PATH="datasets/$(basename -- ${DATASETS_URL})" - DATASETS_DIR="${DATASETS_PATH%.*}" + DATASETS_DIR="${DATASETS_PATH%.*}/" LAST_CHECKPOINT="work_dirs/${CONFIG_FILE_NAME%.*}/last_checkpoint" echo -e "CONFIG_FILE=${CONFIG_FILE}" @@ -67,7 +67,7 @@ functional_test_core() --no-validate \ --cfg-options \ data_root="${DATASETS_DIR}" \ - max_epochs=10 + epochs=3 return $? ;; "export") @@ -76,7 +76,7 @@ functional_test_core() "${CONFIG_FILE}" \ "$(cat ${LAST_CHECKPOINT})" \ tflite onnx \ - --calibration_epochs 1 \ + --calibration-epochs 1 \ --cfg-options \ data_root="${DATASETS_DIR}" return $? From cc80e1a28b5ce9a23bd34305e2cafa9a8bedf5fc Mon Sep 17 00:00:00 2001 From: Unbinilium <15633984+Unbinilium@users.noreply.github.com> Date: Fri, 7 Jul 2023 23:41:01 +0800 Subject: [PATCH 3/4] ci: add PR stage tests --- .github/workflows/code-lint.yml | 11 +++++++++-- .github/workflows/docs-build.yml | 2 -- .github/workflows/functional-test.yml | 25 ++++++++++++++++++++++--- .github/workflows/smoke-test.yml | 11 ++++++----- 4 files changed, 37 insertions(+), 12 deletions(-) diff --git a/.github/workflows/code-lint.yml b/.github/workflows/code-lint.yml index e0450861..21b4dd89 100644 --- a/.github/workflows/code-lint.yml +++ b/.github/workflows/code-lint.yml @@ -3,7 +3,7 @@ name: code-lint on: push: branches: - - dev # should be 'main' later + - main paths: - 'configs/**' - 'edgelab/**' @@ -14,7 +14,14 @@ on: pull_request: branches: - - dev # should be 'main' later + - dev + paths: + - 'configs/**' + - 'edgelab/**' + - 'tools/**' + - 'pyproject.toml' + - '.pre-commit-config.yaml' + - '.github/workflows/code-lint.yml' concurrency: group: ${{ github.workflow }}-${{ github.ref }} diff --git a/.github/workflows/docs-build.yml b/.github/workflows/docs-build.yml index e73fc003..8acec06a 100644 --- a/.github/workflows/docs-build.yml +++ b/.github/workflows/docs-build.yml @@ -21,8 +21,6 @@ jobs: steps: - name: checkout repository uses: actions/checkout@v3 - with: - ref: 'main' # should be main later - name: setup node uses: actions/setup-node@v3 diff --git a/.github/workflows/functional-test.yml b/.github/workflows/functional-test.yml index 6029a7a9..eee5ec01 100644 --- a/.github/workflows/functional-test.yml +++ b/.github/workflows/functional-test.yml @@ -3,7 +3,7 @@ name: functional-test on: push: branches: - - dev # should be 'main' later + - main paths: - 'configs/**' - 'edgelab/**' @@ -17,6 +17,27 @@ on: - 'setup.py' - '.github/workflows/functional-test.yml' + pull_request: + branches: + - dev + paths: + - 'configs/**' + - 'edgelab/**' + - 'requirements/**' + - 'scripts/**' + - 'tools/**' + - 'environment.yml' + - 'environment_cuda.yml' + - 'requirements.txt' + - 'requirements_cuda.txt' + - 'setup.py' + - '.github/workflows/functional-test.yml' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + + jobs: partial-test: name: functional test for (${{ matrix.task }}, ${{ matrix.os }}) @@ -37,8 +58,6 @@ jobs: steps: - name: checkout repository uses: actions/checkout@v3 - with: - ref: dev # should be main later - name: setup python uses: actions/setup-python@v4 diff --git a/.github/workflows/smoke-test.yml b/.github/workflows/smoke-test.yml index 38d8a935..c55d63c8 100644 --- a/.github/workflows/smoke-test.yml +++ b/.github/workflows/smoke-test.yml @@ -3,7 +3,8 @@ name: smoke-test on: push: branches: - - dev # should be 'main' later + - dev + - main paths: - 'requirements/**' - 'scripts/**' @@ -17,6 +18,10 @@ on: schedule: - cron: '0 0 * * *' +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: manual-installation: name: manual installation test for (${{ matrix.python-version }}, ${{ matrix.os }}) @@ -39,8 +44,6 @@ jobs: steps: - name: checkout repository uses: actions/checkout@v3 - with: - ref: dev # should be main later - name: setup python uses: actions/setup-python@v4 @@ -73,8 +76,6 @@ jobs: steps: - name: checkout repository uses: actions/checkout@v3 - with: - ref: dev # should be main later - name: setup deps run: | From a74fe8c0318571605cb9973ba97edfd821bdccd3 Mon Sep 17 00:00:00 2001 From: Unbinilium <15633984+Unbinilium@users.noreply.github.com> Date: Fri, 7 Jul 2023 23:46:34 +0800 Subject: [PATCH 4/4] fix: use epochs instead of max_epochs --- configs/yolov5/base_arch.py | 6 +++--- configs/yolox/base_arch.py | 6 +++--- docs/tutorials/training/fomo.md | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/configs/yolov5/base_arch.py b/configs/yolov5/base_arch.py index ffa53167..ec979a3b 100644 --- a/configs/yolov5/base_arch.py +++ b/configs/yolov5/base_arch.py @@ -29,7 +29,7 @@ # -----train val related----- # Base learning rate for optim_wrapper. Corresponding to 8xb16=128 bs base_lr = 0.01 -max_epochs = 300 # Maximum training epochs +epochs = 300 # Maximum training epochs model_test_cfg = dict( # The config of multi-label for multi-class prediction. @@ -239,7 +239,7 @@ default_hooks = dict( param_scheduler=dict( - type='YOLOv5ParamSchedulerHook', scheduler_type='linear', lr_factor=lr_factor, max_epochs=max_epochs + type='YOLOv5ParamSchedulerHook', scheduler_type='linear', lr_factor=lr_factor, max_epochs=epochs ), checkpoint=dict( type='CheckpointHook', interval=save_checkpoint_intervals, save_best='auto', max_keep_ckpts=max_keep_ckpts @@ -258,7 +258,7 @@ test_evaluator = val_evaluator train_cfg = dict( - type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=save_checkpoint_intervals, _delete_=True + type='EpochBasedTrainLoop', max_epochs=epochs, val_interval=save_checkpoint_intervals, _delete_=True ) val_cfg = dict(type='ValLoop') test_cfg = dict(type='TestLoop') diff --git a/configs/yolox/base_arch.py b/configs/yolox/base_arch.py index 7a39436d..f8af088f 100644 --- a/configs/yolox/base_arch.py +++ b/configs/yolox/base_arch.py @@ -29,7 +29,7 @@ # -----train val related----- # Base learning rate for optim_wrapper. Corresponding to 8xb16=128 bs base_lr = 0.01 -max_epochs = 300 # Maximum training epochs +epochs = 300 # Maximum training epochs model_test_cfg = dict( # The config of multi-label for multi-class prediction. @@ -292,7 +292,7 @@ type="YOLOv5ParamSchedulerHook", scheduler_type="linear", lr_factor=lr_factor, - max_epochs=max_epochs, + max_epochs=epochs, ), checkpoint=dict( type="CheckpointHook", @@ -323,7 +323,7 @@ train_cfg = dict( type="EpochBasedTrainLoop", - max_epochs=max_epochs, + max_epochs=epochs, val_interval=save_checkpoint_intervals, _delete_=True, ) diff --git a/docs/tutorials/training/fomo.md b/docs/tutorials/training/fomo.md index 0f54dfbf..b7e3e977 100644 --- a/docs/tutorials/training/fomo.md +++ b/docs/tutorials/training/fomo.md @@ -103,7 +103,7 @@ optim_wrapper=dict(optimizer=dict(type='Adam', lr=lr, weight_decay=5e-4,eps=1e-7 #evaluator val_evaluator=dict(type='FomoMetric') test_evaluator=val_evaluator -train_cfg=dict(by_epoch=True,max_epochs=70) +train_cfg=dict(by_epoch=True, max_epochs=70) # learning policy param_scheduler=[