diff --git a/.github/workflows/qase-sync.yaml b/.github/workflows/qase-sync.yaml index 8eba9580bf..f7e0221eab 100644 --- a/.github/workflows/qase-sync.yaml +++ b/.github/workflows/qase-sync.yaml @@ -101,13 +101,29 @@ jobs: for file_path in "${test_cases[@]}"; do + if [[ ! -e "${file_path}" ]]; then + echo "${file_path} has been deleted" + git checkout HEAD^ -- "${file_path}" + delete_test_case=true + fi + title=$(grep '^title:' ${file_path} | sed 's/title: //g' | sed 's/"/\\"/g') echo "got test case title: ${title}" description=$(sed -z 's/\n/\\n/g' ${file_path} | sed 's/ \\/ \\\\/g' | sed 's/"/\\"/g') echo "got test case description: ${description}" res=$(curl -s --request GET --url "https://api.qase.io/v1/case/LH" --get --data-urlencode "search=${title}" --header "Token: ${token}" --header "accept: application/json") - if [[ $(echo $res | jq .result.count) -ne "0" ]]; then + if [[ $(echo $res | jq .result.count) -ne "0" ]] && [[ ${delete_test_case} ]]; then + # delete existing test case + test_case_id=$(echo $res | jq .result.entities[0].id) + + res=$(curl --request DELETE -s \ + --url "https://api.qase.io/v1/case/LH/${test_case_id}" \ + --header "Token: ${token}" \ + --header "accept: application/json") + + echo "deleted existing test case: ${res}" + elif [[ $(echo $res | jq .result.count) -ne "0" ]]; then # update existing test case test_case_id=$(echo $res | jq .result.entities[0].id) diff --git a/docs/content/manual/pre-release/backup-and-restore/concurrent-backup.md b/docs/content/manual/pre-release/backup-and-restore/concurrent-backup.md deleted file mode 100644 index ffcfc03237..0000000000 --- a/docs/content/manual/pre-release/backup-and-restore/concurrent-backup.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: "[#1341](https://github.com/longhorn/longhorn/issues/1341) concurrent backup test" ---- -- Take a manual backup of the volume `bak` while a recurring backup is running -- verify that backup got created -- verify that backup sticks around even when recurring backups are cleaned up -from-literal=AWS_SECRET_ACCESS_KEY=$AWS_KEY \ -AWS_SECRET_ACCESS_KEY_ID=$AWS_ID \ -END -- verify that backup sticks around even when recurring backups are cleaned up -OK diff --git a/docs/content/manual/pre-release/cluster-restore/restore-to-a-new-cluster.md b/docs/content/manual/pre-release/cluster-restore/restore-to-a-new-cluster.md index 963322c360..a3113c9fa7 100644 --- a/docs/content/manual/pre-release/cluster-restore/restore-to-a-new-cluster.md +++ b/docs/content/manual/pre-release/cluster-restore/restore-to-a-new-cluster.md @@ -3,7 +3,7 @@ title: Restore to a new cluster --- #### Back up the old cluster -1. Deploy the 1st cluster then install Longhorn system and Velero. +1. Deploy the 1st cluster then install Longhorn system and Velero.OKOK 2. Deploy some workloads using Longhorn volumes then write some data: 1. A simple pod using multiple volumes. And some volumes are using backing images. 2. A StatefulSet.