diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml
index b1ab6b8e6a..3e239f5e31 100644
--- a/.github/workflows/codespell.yml
+++ b/.github/workflows/codespell.yml
@@ -20,3 +20,4 @@ jobs:
with:
check_filenames: true
skip: "*/**.yaml,*/**.yml,./scripts,./vendor,MAINTAINERS,LICENSE,go.mod,go.sum"
+ ignore_words_list: aks
diff --git a/build_engine_test_images/terraform/aws/ubuntu/main.tf b/build_engine_test_images/terraform/aws/ubuntu/main.tf
index 82dab61c9f..70ece8d133 100644
--- a/build_engine_test_images/terraform/aws/ubuntu/main.tf
+++ b/build_engine_test_images/terraform/aws/ubuntu/main.tf
@@ -99,7 +99,7 @@ resource "aws_route_table" "build_engine_aws_public_rt" {
}
}
-# Assciate public subnet to public route table
+# Associate public subnet to public route table
resource "aws_route_table_association" "build_engine_aws_public_subnet_rt_association" {
depends_on = [
aws_subnet.build_engine_aws_public_subnet,
diff --git a/docs/content/manual/functional-test-cases/backup.md b/docs/content/manual/functional-test-cases/backup.md
index 9795af967c..ea366600e9 100644
--- a/docs/content/manual/functional-test-cases/backup.md
+++ b/docs/content/manual/functional-test-cases/backup.md
@@ -28,7 +28,7 @@ Backup create operations test cases
|-----| --- | --- | --- |
| 1 | Create backup from existing snapshot | **Prerequisite:**
* Backup target is set to NFS server, or S3 compatible target.
1. Create a workload using Longhorn volume
2. Write data to volume, compute it’s checksum (checksum#1)
3. Create a snapshot (snapshot#1)
4. Create a backup from (snapshot#1)
5. Restore backup to a different volume
6. Attach volume to a node and check it’s data, and compute it’s checksum | * Backup should be created
* Restored volume data checksum should match (checksum#1) |
| 2 | Create volume backup for a volume attached to a node | **Prerequisite:**
* Backup target is set to NFS server, or S3 compatible target.
1. Create a volume, attach it to a node
2. Format volume using ext4/xfs filesystem and mount it to a directory on the node
3. Write data to volume, compute it’s checksum (checksum#1)
4. Create a backup
5. Restore backup to a different volume
6. Attach volume to a node and check it’s data, and compute it’s checksum
7. Check volume backup labels | * Backup should be created
* Restored volume data checksum should match (checksum#1)
* backup should have no backup labels |
-| 3 | Create volume backup used by Kubernetes workload | **Prerequisite:**
* Backup target is set to NFS server, or S3 compatible target.
1. Create a deployment workload with `nReplicas = 1` using Longhorn volume
2. Write data to volume, compute it’s checksum (checksum#1)
3. Create a backup
4. Check backup labels
5. Scale down deployment `nReplicas = 0`
6. Delete Longhorn volume
7. Restore backup to a volume with the same deleted volume name
8. Scale back deployment `nReplicas = 1`
9. Check volume data checksum | * Backup labels should contain the following informations about workload that was using the volume at time of backup.
* Namespace
* PV Name
* PVC Name
* PV Status
* Workloads Status
* Pod Name
Workload Name
Workload Type
Pod Status
* After volume restore, data checksum should match (checksum#1) |
+| 3 | Create volume backup used by Kubernetes workload | **Prerequisite:**
* Backup target is set to NFS server, or S3 compatible target.
1. Create a deployment workload with `nReplicas = 1` using Longhorn volume
2. Write data to volume, compute it’s checksum (checksum#1)
3. Create a backup
4. Check backup labels
5. Scale down deployment `nReplicas = 0`
6. Delete Longhorn volume
7. Restore backup to a volume with the same deleted volume name
8. Scale back deployment `nReplicas = 1`
9. Check volume data checksum | * Backup labels should contain the following information about workload that was using the volume at time of backup.
* Namespace
* PV Name
* PVC Name
* PV Status
* Workloads Status
* Pod Name
Workload Name
Workload Type
Pod Status
* After volume restore, data checksum should match (checksum#1) |
| 4 | Create volume backup with customized labels | **Prerequisite:**
* Backup target is set to NFS server, or S3 compatible target.
1. Create a volume, attach it to a node
2. Create a backup, add customized labels
key: `K1` value: `V1`
3. Check volume backup labels | * Backup should be created with customized labels |
| 5 | Create recurring backups | 1. Create a deployment workload with `nReplicas = 1` using Longhorn volume
2. Write data to volume , compute it’s checksum (checksum#1)
3. Create a recurring backup `every 5 minutes`. and set retain count to `5`
4. add customized labels key: `K1` value: `V1`
5. Wait for recurring backup to triggered (backup#1, backup#2 )
6. Scale down deployment `nReplicas = 0`
7. Delete the volume.
8. Restore backup to a volume with the same deleted volume name
9. Scale back deployment `nReplicas = 1`
10. Check volume data checksum | * backups should be created with Kubernetes status labels and customized labels
* After volume restore, data checksum should match (checksum#1)
* after restoring the backup recurring backups should continue to be created |
| 6 | Backup created using Longhorn behind proxy | **Prerequisite:**
* Setup a Proxy on an instance (Optional: use squid)
* Create a single node cluster in EC2
* Deploy Longhorn
1. Block outgoing traffic except for the proxy instance.
2. Create AWS secret in longhorn.
3. In UI Settings page, set backupstore target and backupstore credential secret
4. Create a volume, attach it to a node, format the volume, and mount it to a directory.
5. Write some data to the volume, and create a backup. | * Ensure backup is created |
@@ -99,7 +99,7 @@ Disaster Recovery test cases
| DR volume across the cluster #5 | Cluster A:
* Create volume Y
* Attach the volume Y
* Create a backup of Y
Cluster B:
* Backup Volume list page, click \`Create Disaster Recovery Volume\` from volume dropdown
* Create two DR volumes Ydr1 and Ydr2.
* Attach the volume Y to any node
* Mount the volume Y on the node
* Write a file of 10Mb into it, use \`/dev/urandom\` to generate the file
* Calculate the checksum of the file
* Make a Backup
* Attach Ydr1 and Ydr2 to any nodes | * DR volume's last backup should be updated automatically, after settings.BackupPollInterval passed.
* DR volume.LastBackup should be different from DR volume's controller\[0\].LastRestoredBackup temporarily (it's restoring the last backup)
* During the restoration, DR volume cannot be activated.
* Eventually, DR volume.LastBackup should equal to controller\[0\].LastRestoredBackup. |
| DR volume across the cluster #6 | \[follow #5\]
Cluster A:
* In the directory mounted volume Y, write a new file of 100Mb.
* Record the checksum of the file
* Create a backup of volume Y
Cluster B:
* Wait for restoration of volume Ydr1 and Ydr2 to complete
* Activate Ydr1
* Attach it to one node and verify the content | * DR volume's last backup should be updated automatically, after settings.BackupPollInterval passed.
* Eventually, DR volume.LastBackup should equal to controller\[0\].LastRestoredBackup.
* Ydr1 should have the same file checksum of volume Y |
| DR volume across the cluster #7 | \[follow #6\]
Cluster A
* In the directory mounted volume Y, remove all the files. Write a file of 50Mb
* Record the checksum of the file
Cluster B
* Change setting.BackupPollInterval to longer e.g. 1h
Cluster A
* Create a backup of volume Y
Cluster B
\[DO NOT CLICK BACKUP PAGE, which will update last backup as a side effect\]
* Before Ydr2's last backup updated, activate Ydr2 | * YBdr2's last backup should be immediately updated to the last backup of volume Y
* Activate should fail due to restoration is in progress | When user clicks on “activate DRV”, restoration happens
And the volume goes into detached state |
-| DR volume across the cluster #8 | Cluster A
* Create volume Z
* Attach the volume Z
* Create a backup of Z
Cluster B
* Backup Volume list page, click \`Create Disaster Recovery Volume\` from volume dropdown
* Create DR volumes Zdr1, Zdr2 and Zdr3
* Attach the volume Zdr1, Zdr2 and Zdr3 to any node
* Change setting.BackupPollInterval to approriate interval for multiple backups e.g. 15min
* Make sure LastBackup of Zdr is consistent with that of Z
Cluster A
* Create multiple backups for volume Z before Zdr's last backup updated. For each backup, write or modify at least one file then record the cheksum.
Cluster B
* Wait for restoration of volume Zdr1 to complete
* Activate Zdr1
* Attach it to one node and verify the content | * Zdr1's last backup should be updated after settings.BackupPollInterval passed.
* Zdr1 should have the same files with the the same checksums of volume Z |
+| DR volume across the cluster #8 | Cluster A
* Create volume Z
* Attach the volume Z
* Create a backup of Z
Cluster B
* Backup Volume list page, click \`Create Disaster Recovery Volume\` from volume dropdown
* Create DR volumes Zdr1, Zdr2 and Zdr3
* Attach the volume Zdr1, Zdr2 and Zdr3 to any node
* Change setting.BackupPollInterval to appropriate interval for multiple backups e.g. 15min
* Make sure LastBackup of Zdr is consistent with that of Z
Cluster A
* Create multiple backups for volume Z before Zdr's last backup updated. For each backup, write or modify at least one file then record the checksum.
Cluster B
* Wait for restoration of volume Zdr1 to complete
* Activate Zdr1
* Attach it to one node and verify the content | * Zdr1's last backup should be updated after settings.BackupPollInterval passed.
* Zdr1 should have the same files with the the same checksums of volume Z |
| DR volume across the cluster #9 | \[follow #8\]
Cluster A
* Delete the latest backup of Volume Z | * Last backup of Zdr2 and Zdr3 should be empty after settings.BackupPollInterval passed. Field controller\[0\].LastRestoredBackup and controller\[0\].RequestedBackupRestore should retain. |
| DR volume across the cluster #10 | \[follow #9\]
Cluster B
* Activate Zdr2
* Attach it to one node and verify the content | * Zdr2 should have the same files with the the same checksums of volume Z | |
| DR volume across the cluster #11 | \[follow #10\]
Cluster A
* Create one more backup with at least one file modified.
Cluster B
* Wait for restoration of volume Zdr3 to complete
* Activate Zdr3
* Attach it to one node and verify the content | * Zdr3 should have the same files with the the same checksums of volume Z |
@@ -150,7 +150,7 @@ The setup requirements:
| 4 | Delete the backup with `DeletionPolicy` as delete | 1. Repeat the steps from test scenario 1.
2. Delete the `VolumeSnapshot` using `kubectl delete volumesnapshots test-snapshot-pvc` | 1. The `VolumeSnapshot` should be deleted.
2. By default the `DeletionPolicy` is delete, so the `VolumeSnapshotContent` should be deleted.
3. Verify in the backup store, the backup should be deleted. |
| 5 | Delete the backup with `DeletionPolicy` as retain | 1. Create a `VolumeSnapshotClass` class with `deletionPolicy` as Retain
kind: VolumeSnapshotClass
apiVersion: snapshot.storage.k8s.io/v1beta1
metadata:
name: longhorn
driver: driver.longhorn.io
deletionPolicy: Retain
2. Repeat the steps from test scenario 1.
3. Delete the `VolumeSnapshot` using `kubectl delete volumesnapshots test-snapshot-pvc` | 1. The `VolumeSnapshot` should be deleted.
2. `VolumeSnapshotContent` should NOT be deleted.
3. Verify in the backup store, the backup should NOT be deleted. |
| 6 | Take a backup from longhorn of a snapshot created by csi snapshotter. | 1. Create a volume test-vol and write into it.
1. Compute the md5sum
2. Create the below `VolumeSnapshot` object
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: test-snapshot-pvc
spec:
volumeSnapshotClassName: longhorn
source:
persistentVolumeClaimName: test-vol
3. Go to longhorn UI and click on the snapshot created and take another backup | 1. On creating a `VolumeSnapshot`, a backup should be created in the backup store.
2. On creating another backup from longhorn UI, one more backup should be created in backup store. |
-| 7 | Delete the `csi plugin` while a backup is in progress. | 1. Create a volume and write into it.
Compute the md5sum of the data.
2. Create the below `VolumeSnapshot` object
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: test-snapshot-pvc
spec:
volumeSnapshotClassName: longhorn
source:
persistentVolumeClaimName: test-vol
3. While the backup is in progress, delete the `csi plugin` | On deleting `csi plugin` , a new pod of `csi plugin` should get created and the bacup should continue to complete. |
+| 7 | Delete the `csi plugin` while a backup is in progress. | 1. Create a volume and write into it.
Compute the md5sum of the data.
2. Create the below `VolumeSnapshot` object
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: test-snapshot-pvc
spec:
volumeSnapshotClassName: longhorn
source:
persistentVolumeClaimName: test-vol
3. While the backup is in progress, delete the `csi plugin` | On deleting `csi plugin` , a new pod of `csi plugin` should get created and the backup should continue to complete. |
| 8 | Take a backup using csi snapshotter with backup store as NFS server. | | |
| 9 | Restore from NFS backup store. | | |
| 10 | Delete from NFS backup store. | | |
diff --git a/docs/content/manual/functional-test-cases/kubernetes.md b/docs/content/manual/functional-test-cases/kubernetes.md
index 5f13237538..a4c603d61f 100644
--- a/docs/content/manual/functional-test-cases/kubernetes.md
+++ b/docs/content/manual/functional-test-cases/kubernetes.md
@@ -43,13 +43,13 @@ title: 5. Kubernetes
| 2 | Persistent Volume: Create a PV | **Pre condition:**
* Longhorn is deployed in the cluster
**Steps:**
1. Create a Volume in Longhorn UI `test-volume`
2. Go to cluster → Storage → Persistent Volumes
3. Click on Add PV
4. Select Volume Plugin **Longhorn**
5. Give in other required parameters including replica count.
6. Give in Volume Plugin - `test-volume` which an existing volume in longhorn
7. Click on **Save**.
8. Verify **test-1** PV is created
9. Go to Cluster → Project (default) → Workloads
10. Deploy a workload
11. In the Volumes section → Add a New Volume Claim → Use an existing persistent volume → Select **test-1** from PV dropdown.
12. Click on Define
13. Enter Mount Point.
14. Click on create workload
15. Verify workload is created successfully.
16. Volume gets attached to the pod in the workload
17. Navigate to Longhorn UI.
18. Verify user is able to view the volume attached to the workload in the UI
19. Navigate to volume details page of the volume and Verify the replica count mentioned in Step 4 is available | * Longhorn PV should be created
* Workload should be deployed with the volume mounted from the PV
* Verify volume is available on the Longhorn UI.
* Verify the replica count is as mentioned during storage class creation. |
| 3 | Create Storage class in Rancher; From Longhorn create volumes from this storage class. | **Pre condition:**
* Longhorn is deployed in the cluster
**Steps:**
1. Go to cluster → Storage → Storage Classes
2. Click on Add class
3. Select Provisioner **Longhorn**
4. Give in other required parameters including replica count.
5. Click on **Save**.
6. Verify **test-1** storage class is created
7. Go to Longhorn UI
8. In the Settings page for “Default Longhorn Static StorageClass Name”, give in the value: “test-1”
9. Go to Volumes page, click on create volume.
10. Create a volume name : v1
11. Verify v1 is created
12. using kubectl -
13. kubectl get pv -o yaml
14. Verify “storageClassName:” ---> test-1 | * Longhorn storage class should be created
* Value of Default Longhorn Static StorageClass Name should be changed in the settings page
* volume should be created in longhorn UI
* “storageClassName:” value should be **test-1** |
| 4 | Create Storage Class using backup URL | 1. Create volume and PV/PVC/POD in Longhorn
2. Write `test_data` into pod
3. Create a snapshot and back it up. Get the backup URL
4. Create a new StorageClass `longhorn-from-backup` in rancher and set backup URL.
5. Use `longhorn-from-backup` to create a new PVC
6. Wait for the volume to be created and complete the restoration.
7. Create the pod using the PVC. Verify the data | |
-| 5 | Create Storage class - by using different values for the input list of paramters | **Pre condition:**
* Longhorn is deployed in the cluster
**Steps:**
1. Go to cluster → Storage → Storage Classes
2. Click on Add class
3. Select Provisioner **Longhorn**
4. Give in other required parameters.
5. Click on **Save**.
6. Use this storage class to create a PVC and deploy in a workload.
7. Verify the parameters of the volume created. | Volume parameters should match the storage class paramaters. |
+| 5 | Create Storage class - by using different values for the input list of parameters | **Pre condition:**
* Longhorn is deployed in the cluster
**Steps:**
1. Go to cluster → Storage → Storage Classes
2. Click on Add class
3. Select Provisioner **Longhorn**
4. Give in other required parameters.
5. Click on **Save**.
6. Use this storage class to create a PVC and deploy in a workload.
7. Verify the parameters of the volume created. | Volume parameters should match the storage class parameters. |
| 6 | StorageClass with `reclaimPolicy` parameter set to `Delete` - PVC from storage class | **Pre conditions:**
* Create PVC from “Longhorn” storage class in rancher.
* It will have a dynamic PV bound
**Steps**:
1. 'Delete PVC from Rancher
2. Verify PVC is deleted
3. Verify PV bound to this PVC is deleted - Rancher → Cluster → Storage → PV
4. Verify the volume(Dynamic PV) in Longhorn is deleted | |
| 7 | Volume/PV/PVC created in Longhorn | **Pre conditions:**
* Create volume, PV, PVC in longhorn
**Steps:**
1. 'Delete PVC from Rancher
2. Verify PVC is deleted
3. PV will NOT. be deleted but be in “released” state in Rancher UI
4. Verify Volume does not get deleted | |
| 8 | StorageClass with `reclaimPolicy` parameter set to `Retain` - PVC from storage class | **Pre conditions:**
* Create PVC from “Longhorn” storage class in rancher.
* It will have a dynamic PV bound
**Steps**:
1. 'Delete PVC from Rancher
2. Verify PVC is deleted
3. Verify PV bound to this PVC is NOT deleted - Rancher → Cluster → Storage → PV
4. Verify the volume(Dynamic PV) in Longhorn is NOT deleted | |
| 9 | StorageClass with `reclaimPolicy` parameter set to `Retain` - Volume/PV/PVC created in Longhorn | **Pre conditions:**
* Create volume, PV, PVC in longhorn
**Steps:**
1. 'Delete PVC from Rancher
2. Verify PVC is deleted
3. PV will NOT. be deleted but be in “released” state in Rancher UI
4. Verify Volume does not get deleted | |
-| 10 | Power down node | 1. Power down
2. Replica migrates
3. Power back on
4. Verify if the replicas in the node have been deleted | * When a node is powered down, the replica is rebuilt on the 4th wrker node.
* When the node is powered back on, and the replica on the powered down node is not available in Longhorn UI anymore, there is no data in `/var/lib/longhorn/replicas` folder in the powered on node. |
-| 11 | Power down node with. Node tag/disk tag | 1. Add a node tag/disk tag
2. Power down
3. Replica cannot migrate
4. Power back on
5. Replica should get rebuilt on this node | * When a node is powered down, the replica is rebuilt on the 4th wrker node.
* When the node is powered back on, and the replica on the powered down node is not available in Longhorn UI anymore, there is no data in `/var/lib/longhorn/replicas` folder in the powered on node.
* The new replica is rebuilt on a node which has a tag. |
+| 10 | Power down node | 1. Power down
2. Replica migrates
3. Power back on
4. Verify if the replicas in the node have been deleted | * When a node is powered down, the replica is rebuilt on the 4th worker node.
* When the node is powered back on, and the replica on the powered down node is not available in Longhorn UI anymore, there is no data in `/var/lib/longhorn/replicas` folder in the powered on node. |
+| 11 | Power down node with. Node tag/disk tag | 1. Add a node tag/disk tag
2. Power down
3. Replica cannot migrate
4. Power back on
5. Replica should get rebuilt on this node | * When a node is powered down, the replica is rebuilt on the 4th worker node.
* When the node is powered back on, and the replica on the powered down node is not available in Longhorn UI anymore, there is no data in `/var/lib/longhorn/replicas` folder in the powered on node.
* The new replica is rebuilt on a node which has a tag. |
| 12 | Drain a node | 1. Drain use case — drain a worker node
2. Check if the State of the node reflects in the Longhorn UI —> Node
3. Verify if replica is rebuilt on another node?
4. Verify if the pod migrates
5. And the volume get migrated | All the components should be successfully drained. |
| 13 | kubectl - force drain | Using kubectl - force drain a node where the pod with the volume attached is available
Have snapshots before
Verify data after pod migrates | Volume attaches on the new pod
2 of the 3 replicas are in “Stopped” state - Caused replica rebuild. |
| 14 | Cordon a node | 1. Cordon state - cordon a worker node | |
diff --git a/docs/content/manual/functional-test-cases/monitoring.md b/docs/content/manual/functional-test-cases/monitoring.md
index ef0bcd4dce..1fd2b42146 100644
--- a/docs/content/manual/functional-test-cases/monitoring.md
+++ b/docs/content/manual/functional-test-cases/monitoring.md
@@ -157,8 +157,8 @@ spec:
| 6 | longhorn\_instance\_manager\_cpu\_usage\_millicpu | **Pre-requisite:**
1. Prometheus is setup is done and Prometheus web UI is accessible.
**Test Steps:**
1. Create a volume and attach it to a pod.2. Write 1 Gi data into it.3. Set multiple recurring backup on the volume.4. Go to Prometheus web UI.5. Select `longhorn_instance_manager_cpu_usage_millicpu` and execute. | 1. The reading of cpu\_usage should be shown correctly2. The reading of other instance managers should not get impacted. |
| 7 | longhorn\_instance\_manager\_memory\_requests\_bytes | **Pre-requisite:**
1. Prometheus is setup is done and Prometheus web UI is accessible.
**Test Steps:**
1. Create a volume and attach it to a pod.2. Write 1 Gi data into it.3. Set multiple recurring backup on the volume.4. Go to Prometheus web UI.5. Select `longhorn_instance_manager_memory_requests_bytes` and execute. | 1. The reading of memory\_requests should go up for the attached instance manager.2. The reading of other instance managers should not get impacted. |
| 8 | longhorn\_instance\_manager\_memory\_usage\_bytes | **Pre-requisite:**
1. Prometheus is setup is done and Prometheus web UI is accessible.
**Test Steps:**
1. Create a volume and attach it to a pod.2. Write 1 Gi data into it.3. Set multiple recurring backup on the volume.4. Go to Prometheus web UI.5. Select `longhorn_instance_manager_memory_usage_bytes` and execute. | 1. The reading of memory\_usage should go up for the attached instance manager.2. The reading of other instance managers should not get impacted. |
-| 9 | longhorn\_manager\_cpu\_usage\_millicpu | **Pre-requisite:**
1. Prometheus is setup is done and Prometheus web UI is accessible.
**Test Steps:**
1. Create 3 volumes of different sizes.2. Attach 1st volume to a pod and write 1 Gi data into it.3. Leave the 2rd volume to the detached state.4. Attach the 3th volume to pod and write 1.5 Gi data into it. Attach the volume in maintenance mode.5. Set a recurring backup on volume 1st.6. Perform revert to snapshot with 3rd volume.7. Go to Prometheus web UI.8. Select `longhorn_manager_cpu_usage_millicpu` and execute. | 1. Monitor the graph and the console on the Prometheus server, the cpu\_usage should go up. |
-| 10 | longhorn\_manager\_memory\_usage\_bytes | **Pre-requisite:**
1. Prometheus is setup is done and Prometheus web UI is accessible.
**Test Steps:**
1. Create 3 volumes of different sizes.2. Attach 1st volume to a pod and write 1 Gi data into it.3. Leave the 2rd volume to the detached state.4. Attach the 3th volume to pod and write 1.5 Gi data into it. Attach the volume in maintenance mode.5. Set a recurring backup on volume 1st.6. Perform revert to snapshot with 3rd volume.7. Try to make disk full of a node where `longhorn-manager` is running.8. Go to Prometheus web UI.9. Select `longhorn_manager_memory_usage_bytes` and execute. | 1. Monitor the graph and the console on the Prometheus server, the memory\_usage should go up. |
+| 9 | longhorn\_manager\_cpu\_usage\_millicpu | **Pre-requisite:**
1. Prometheus is setup is done and Prometheus web UI is accessible.
**Test Steps:**
1. Create 3 volumes of different sizes.2. Attach 1st volume to a pod and write 1 Gi data into it.3. Leave the 2nd volume to the detached state.4. Attach the 3th volume to pod and write 1.5 Gi data into it. Attach the volume in maintenance mode.5. Set a recurring backup on volume 1st.6. Perform revert to snapshot with 3rd volume.7. Go to Prometheus web UI.8. Select `longhorn_manager_cpu_usage_millicpu` and execute. | 1. Monitor the graph and the console on the Prometheus server, the cpu\_usage should go up. |
+| 10 | longhorn\_manager\_memory\_usage\_bytes | **Pre-requisite:**
1. Prometheus is setup is done and Prometheus web UI is accessible.
**Test Steps:**
1. Create 3 volumes of different sizes.2. Attach 1st volume to a pod and write 1 Gi data into it.3. Leave the 2nd volume to the detached state.4. Attach the 3th volume to pod and write 1.5 Gi data into it. Attach the volume in maintenance mode.5. Set a recurring backup on volume 1st.6. Perform revert to snapshot with 3rd volume.7. Try to make disk full of a node where `longhorn-manager` is running.8. Go to Prometheus web UI.9. Select `longhorn_manager_memory_usage_bytes` and execute. | 1. Monitor the graph and the console on the Prometheus server, the memory\_usage should go up. |
| 11 | longhorn\_disk\_capacity\_bytes | **Pre-requisite:**
1. Prometheus is setup is done and Prometheus web UI is accessible.
**Test Steps:**
1. Create volumes and attach them to each node.2. Add an additional disk to all the nodes. (Different size)3. Write into the volumes.4. Power down a node.5. Disable a node.6. Add a new node in the cluster.7. Delete a node from the cluster.8. Go to Prometheus web UI.9. Select `longhorn_disk_capacity_bytes` and execute. | 1. All the disks should be identified by Prometheus.2. All the disks should show the correct total size of the disks. |
| 12 | longhorn\_disk\_usage\_bytes | **Pre-requisite:**
1. Prometheus is setup is done and Prometheus web UI is accessible.
**Test Steps:**
1. Create volumes and attach them to each node.2. Add an additional disk to all the nodes. (Different size)3. Write into the volumes.4. Power down a node.5. Disable a node.6. Add a new node in the cluster.7. Delete a node from the cluster.8. Go to Prometheus web UI.9. Select `longhorn_disk_usage_bytes` and execute. | 1. All the disks should be identified by Prometheus.2. All the disks should show the occupied size of the disks. |
| 13 | longhorn\_node\_capacity\_bytes | **Pre-requisite:**
1. Prometheus is setup is done and Prometheus web UI is accessible.
**Test Steps:**
1. Create volumes and attach them to each node.2. Add an additional disk to all the nodes. (Different size)3. Write into the volumes.4. Power down a node.5. Disable a node.6. Add a new node in the cluster.7. Delete a node from the cluster.8. Go to Prometheus web UI.9. Select `longhorn_node_capacity_bytes` and execute. | 1. All the nodes should be identified by Prometheus.2. All the nodes should show the total capacity available of disks available. |
diff --git a/docs/content/manual/functional-test-cases/node.md b/docs/content/manual/functional-test-cases/node.md
index 645c7467d1..1129ea6a7e 100644
--- a/docs/content/manual/functional-test-cases/node.md
+++ b/docs/content/manual/functional-test-cases/node.md
@@ -24,7 +24,7 @@ Test cases
| | **Test Case** | **Test Instructions** | **Expected Results** |
| --- | --- | --- | --- |
| 1 | Node scheduling | * **Prerequisites:**
* Longhorn Deployed with 3 nodes
1. Disable Node Scheduling on a node
2. Create a volume with 3 replicas, and attach it to a node
3. Re-enabled node scheduling on the node | * Volume should be created and attached
* Volume replicas should be scheduled to Schedulable nodes only
* Re-enabling node scheduling will not affect existing scheduled replicas, it will only affect new replicas being created, or rebuilt. | |
-| 2 | Disk Scheduling | * **Prerequisites:**
* Longhorn Deployed with 3 nodes
* Add additional disk (Disk#1) ,attach it and mounted to Node-01.
1. Create a New Disk, Keep Disk Scheduling disabled
2. Create a volume (vol#1), set replica count to `4` and attach it to a node
3. Check (vol#1) replica paths
4. Enable Scheduling on (disk#1)
5. Create a volume (vol#2), set replica count to `4` and attach it to a node
6. Check (vol#2) replica paths | * (vol#1) replicas should be scheduled only to Disks withe Scheduling enabled, no replicas should be scheduled to (disk#1)
* One of (vol#2) replica paths will be scheduled to (disk#1) | Pass
Case of vol#2 - Not necessarily replica will exists on disk#1 provided soft anti affinity is enabled. It might scheduled on disk#1 |
+| 2 | Disk Scheduling | * **Prerequisites:**
* Longhorn Deployed with 3 nodes
* Add additional disk (Disk#1) ,attach it and mounted to Node-01.
1. Create a New Disk, Keep Disk Scheduling disabled
2. Create a volume (vol#1), set replica count to `4` and attach it to a node
3. Check (vol#1) replica paths
4. Enable Scheduling on (disk#1)
5. Create a volume (vol#2), set replica count to `4` and attach it to a node
6. Check (vol#2) replica paths | * (vol#1) replicas should be scheduled only to Disks with Scheduling enabled, no replicas should be scheduled to (disk#1)
* One of (vol#2) replica paths will be scheduled to (disk#1) | Pass
Case of vol#2 - Not necessarily replica will exists on disk#1 provided soft anti affinity is enabled. It might scheduled on disk#1 |
| 3 | Volume Created with Node Tags | * **Prerequisites:**
* Longhorn Deployed with 3 nodes
1. Create Node tags as follows:
1. Node-01: fast
2. Node-02: slow
3. Node-02: fast
2. Create a volume (vol#1), set Node tags to slow
3. Create a volume (vol#2), set Node tags to fast
4. Check Volumes replicas paths
5. Check Volume detail `Node Tags` | * vol#1 replicas should only be scheduled to Node-02
* vol#2 replicas should only be scheduled to Node-01 and Node-03
* Node Tag volume detail should contain Node tag specified in volume creation request. |
| 4 | Volumes created with Disk Tags | * **Prerequisites:**
* Longhorn Deployed with 3 nodes, with default disks (disk#01-1, disk#02-1, disk#03-1)
* `disk#0X-Y` indicate that disk is attached to `Node-0X` , and it is disk number `Y` on that node.
* Create 3 additional disks (disk#01-2, disk#02-2, disk#03-2), attach each one to a different node, and mount it to a directory on that node.
1. Create Disk tags as follows:
1. disk#01-1: fast
2. disk#01-2: fast
3. disk#02-1: slow
4. disk#02-2: slow
5. disk#03-1: fast
6. disk#01-2: fast
2. Create a volume (vol#1), set Disk tags to slow
3. Create a volume (vol#2), set Disk tags to fast
4. Check Volumes replicas paths
5. Check Volume detail `Disk Tags` | * vol#1 replicas should only be scheduled to disks have slow tag (disk#02-1 and disk#02-2)
* vol#2 replicas should can be scheduled to disks have fast Tag
(disk#01-1, disk#01-2, disk#03-1, disk#03-2)
* Disk Tag volume detail should contain Disk tag specified in volume creation request. |
| 5 | Volumes created with both DIsk and Node Tags | * Create a volume, set Disk and node tags, and attach it to a node | * Volume replicas should be scheduled only to node that have Node tags, and only on disks that have Disk tags specified on volume creation request
* If No Node match both Node and Disk tags, volume replicas will not be created. |
diff --git a/docs/content/manual/functional-test-cases/volume.md b/docs/content/manual/functional-test-cases/volume.md
index 9692941fd9..c9ccefb14f 100644
--- a/docs/content/manual/functional-test-cases/volume.md
+++ b/docs/content/manual/functional-test-cases/volume.md
@@ -12,7 +12,7 @@ title: 3. Volume
| 5 | Attach multiple volumes in maintenance mode | * **Prerequisite:**
* Create multiple volumes
1. Select multiple volumes and Attach them to a node in maintenance mode | * All Volumes should be attached in maintenance mode to the same node specified in volume attach request. |
| 6 | Detach multiple volumes | * **Prerequisite:**
* Multiple attached volumes
* Select multiple volumes and detach | * Volumes should be detached |
| 7 | Backup multiple Volumes | * **Prerequisite:**
* Longhorn should be configured to point to a backupstore
* Multiple volumes existed and attached to node/used buy kubernetes workload
* Write some data to multiple volumes and compute it’s checksum
* Select multiple volumes and Create a backup
* restore volumes backups and check its data checksum | * Volume backups should be created
* Restored volumes from backup should contain the same data when backup is created |
-| 8 | Create PV/PVC for multiple volumes | **Prerequisite:**
* Create multiple volumes
1. Select multiple volumes
2. Create a PV, specify filesysem
3. Check PV in Lonhgorn UI and in Kubernetes
4. Create PVC
5. Check PVC in Lonhgorn UI and in Kubernetes
6. Delete PVC
7. Check PV in Lonhgorn UI and in Kubernetes | * For all selected volumes
* PV should created
* PV/PVC status in UI should be `Available`
* PV `spec.csi.fsType` should match filesystem specified in PV creation request
* PV `spec.storageClassName` should match the setting in `Default Longhorn Static StorageClass Name`
* PV `spec.csi.volumeHandle` should be the volume name
* PV/PVC status in UI should be `Bound` in Longhorn UI
* PVC namespace should match namespace specified in PVC creation request
* After Deleting PVC, PV/PVC status should be `Relased` in Longhorn UI. |
+| 8 | Create PV/PVC for multiple volumes | **Prerequisite:**
* Create multiple volumes
1. Select multiple volumes
2. Create a PV, specify filesystem
3. Check PV in Lonhgorn UI and in Kubernetes
4. Create PVC
5. Check PVC in Lonhgorn UI and in Kubernetes
6. Delete PVC
7. Check PV in Lonhgorn UI and in Kubernetes | * For all selected volumes
* PV should created
* PV/PVC status in UI should be `Available`
* PV `spec.csi.fsType` should match filesystem specified in PV creation request
* PV `spec.storageClassName` should match the setting in `Default Longhorn Static StorageClass Name`
* PV `spec.csi.volumeHandle` should be the volume name
* PV/PVC status in UI should be `Bound` in Longhorn UI
* PVC namespace should match namespace specified in PVC creation request
* After Deleting PVC, PV/PVC status should be `Released` in Longhorn UI. |
| 9 | Volume expansion | Check Multiple Volume expansion test cases work for multiple volumes
[Test Cases in Volume Details page](https://rancher.atlassian.net/wiki/spaces/LON/pages/354453117/Volume+detail+page) | Volume expansion should work for multiple volumes. |
| 10 | Engine Offline Upgrade For Multiple Volumes | **Prerequisite:**
* Volume is consumed by Kubernetes deployment workload
* Volume use old Longhorn Engine
1. Write data to volume, compute it’s checksum (checksum#1)
2. Scale down deployment , volume gets detached
3. Upgrade Longhorn engine image to use new deployed engine image
4. Scale up deployment, volume gets attached | * Volume read/write operations should work before and after engine upgrade.
* Old Engine `Reference Count` will be decreased by 1
* New Engine `Reference Count` will be increased by 1 |
| 12 | Show System Hidden | **Prerequisite**:
* Volume is created and attached to a pod.
1. Click the volume appearing on volume list page, it takes user to volume.
2. Take snapshot and upgrade the replicas.
3. Under snapshot section, enable option 'Show System Hidden | Enabling this option will show system created snapshots while rebuilding of replicas. |
diff --git a/docs/content/manual/pre-release/cluster-restore/restore-to-an-old-cluster.md b/docs/content/manual/pre-release/cluster-restore/restore-to-an-old-cluster.md
index 10fb2f80cf..838522dd97 100644
--- a/docs/content/manual/pre-release/cluster-restore/restore-to-an-old-cluster.md
+++ b/docs/content/manual/pre-release/cluster-restore/restore-to-an-old-cluster.md
@@ -35,15 +35,15 @@ This test may need to be validated for both kind of cluster.
4. Deploy a StatefulSet with volume D. Write some data and do some snapshot operations. (Validate 2 cases: <1> volume can be recovered automatically if some replicas are removed and some new replicas are replenished; <2> snapshot info will be resynced;)
5. Deploy a Deployment with volume E. Write some data and do some snapshot operations. (Validate 4 cases: <1> engine upgrade; <2> offline expansion)
3. Create a cluster snapshot via Rancher.
-4. Do the followings before the restore:
+4. Do the following before the restore:
1. Delete volume A.
2. Write more data to volume B and create more backups.
3. Remove all current replicas one by one for volume C. Then all replicas of volume C are new replicas.
4. Remove some replicas for volume D. Do snapshot creation, deletion, and revert.
5. Scale down the workload. Upgrade volume E from the default image to another engine image. And do expansion.
- 6. Create and attach volume F via UI. Write some data and do some snapshot operations. (Validate 1 case: Users need to manuall recover the volume if it's created after the cluster snapshot)
+ 6. Create and attach volume F via UI. Write some data and do some snapshot operations. (Validate 1 case: Users need to manually recover the volume if it's created after the cluster snapshot)
5. Restore the cluster.
-6. Check the followings according to the doc:
+6. Check the following according to the doc:
1. Volume A is back. But there is no data in it. And users can re-delete it.
2. Volume B can be reattached or keep attached with correct data. The backup info of volume B is resynced when the volume is reattahed. The pod can use the volume after restart.
3. All old removed replicas are back and all newly rebuilt replicas in step4-3 disappear for volume C. There is no data in volume C. The data directories of the disappeared replicas are still on the node. Hence the data are be recovered by exporting a single replica volume.
diff --git a/docs/content/manual/pre-release/node-not-ready/node-down/single-replica-node-down.md b/docs/content/manual/pre-release/node-not-ready/node-down/single-replica-node-down.md
index 9bc72860b6..53a3c0fb27 100644
--- a/docs/content/manual/pre-release/node-not-ready/node-down/single-replica-node-down.md
+++ b/docs/content/manual/pre-release/node-not-ready/node-down/single-replica-node-down.md
@@ -20,7 +20,7 @@ https://github.com/longhorn/longhorn/issues/3957
6. Power up node or delete the workload pod so that kubernetes will recreate pod on another node.
7. Verify auto salvage finishes (i.e pod completes start).
8. Verify volume attached & accessible by pod (i.e test data is available).
- - For data locality = strict-local volume, volume wiil keep in detaching, attaching status for about 10 minutes, after volume attached to node which replica located, check volume healthy and pod status.
+ - For data locality = strict-local volume, volume will keep in detaching, attaching status for about 10 minutes, after volume attached to node which replica located, check volume healthy and pod status.
## Node restart/down scenario with `Pod Deletion Policy When Node is Down` set to `delete-both-statefulset-and-deployment-pod`
1. Create RWO|RWX volume with replica count = 1 & data locality = enabled|disabled|strict-local.
diff --git a/docs/content/manual/pre-release/node/degraded-availability.md b/docs/content/manual/pre-release/node/degraded-availability.md
index 26f2b17a43..fedbc8b3c1 100644
--- a/docs/content/manual/pre-release/node/degraded-availability.md
+++ b/docs/content/manual/pre-release/node/degraded-availability.md
@@ -15,8 +15,8 @@ title: Degraded availability with added nodes
##### Steps:
1. Create a Deployment Pod with a volume and three replicas.
1. After the volume is attached, on Volume page it should be displayed as `Degraded`
- 1. Hover the crusor to the red circle exclamation mark, the tooltip will says, "The volume cannot be scheduled".
- 1. Click into the volume detail page it will display `Scheduling Failure` but the volume remain fuctional as expected.
+ 1. Hover the cursor to the red circle exclamation mark, the tooltip will says, "The volume cannot be scheduled".
+ 1. Click into the volume detail page it will display `Scheduling Failure` but the volume remain functional as expected.
1. Write data to the Pod.
1. Scale down the deployment to 0 to detach the volume.
1. Volume return to `Detached` state.
diff --git a/docs/content/manual/pre-release/upgrade/backing-image-during-upgrade.md b/docs/content/manual/pre-release/upgrade/backing-image-during-upgrade.md
index ec44abb402..9ca7dc0132 100644
--- a/docs/content/manual/pre-release/upgrade/backing-image-during-upgrade.md
+++ b/docs/content/manual/pre-release/upgrade/backing-image-during-upgrade.md
@@ -38,7 +38,7 @@ title: Test Backing Image during Longhorn upgrade
1. Deploy Longhorn.
2. Create a backing images. Wait for the backing image being ready in the 1st disk.
3. Create and attach volumes with the backing image. Wait for all disk files of the backing image being ready.
-4. Run `kubectl -n longhorn system get pod -w` in a seperate session.
+4. Run `kubectl -n longhorn system get pod -w` in a separate session.
5. Upgrade Longhorn manager but with the backing image manager image unchanged. (Actually we can mock this upgrade by removing all longhorn manager pods simultaneously.)
6. Check if all disk file status of the backing image becomes `unknown` then `ready` during the longhorn manager pods termination and restart. (May need to refresh the UI page after restart.)
7. After the longhorn manager pods restart, Verify there is no backing image data source pod launched for the backing image in the output of step4.
diff --git a/docs/content/manual/release-specific/v1.2.0/label-driven-recurring-job.md b/docs/content/manual/release-specific/v1.2.0/label-driven-recurring-job.md
index 30f6134599..6f448fd7e8 100644
--- a/docs/content/manual/release-specific/v1.2.0/label-driven-recurring-job.md
+++ b/docs/content/manual/release-specific/v1.2.0/label-driven-recurring-job.md
@@ -15,11 +15,11 @@ https://github.com/longhorn/longhorn/issues/467
*And* create volume `test-job-4`.
*And* create volume `test-job-5`.
-**Then** moniter the cron job pod log.
+**Then** monitor the cron job pod log.
*And* should see 2 jobs created concurrently.
**When** update `snapshot1` recurring job with `concurrency` set to `3`.
-**Then** moniter the cron job pod log.
+**Then** monitor the cron job pod log.
*And* should see 3 jobs created concurrently.
diff --git a/docs/content/manual/release-specific/v1.2.0/test-backing-image-upload.md b/docs/content/manual/release-specific/v1.2.0/test-backing-image-upload.md
index e42178b19f..5b8740893c 100644
--- a/docs/content/manual/release-specific/v1.2.0/test-backing-image-upload.md
+++ b/docs/content/manual/release-specific/v1.2.0/test-backing-image-upload.md
@@ -37,7 +37,7 @@ title: Test backing image
1. Create a valid backing image
2. Create a StorageClass, which use the same backing image name but different data source type/parameters.
3. Create a PVC with the StorageClass.
- ==> The corresponding creation should fail. The longhorn-csi-plugin will repeatly print out error logs like this `existing backing image %v data source is different from the parameters in the creation request or StorageClass`.
+ ==> The corresponding creation should fail. The longhorn-csi-plugin will repeatedly print out error logs like this `existing backing image %v data source is different from the parameters in the creation request or StorageClass`.
4. Delete the PVC and the StorageClass.
5. Recreate a StorageClass in which the backing image fields match the existing backing image.
6. Create a PVC with the StorageClass.
diff --git a/docs/content/manual/release-specific/v1.2.3/test-backing-image-checksum-mismatching.md b/docs/content/manual/release-specific/v1.2.3/test-backing-image-checksum-mismatching.md
index fe096f8e5c..89f015c376 100644
--- a/docs/content/manual/release-specific/v1.2.3/test-backing-image-checksum-mismatching.md
+++ b/docs/content/manual/release-specific/v1.2.3/test-backing-image-checksum-mismatching.md
@@ -3,7 +3,7 @@ title: Test backing image checksum mismatching
---
### Test step
-1. Modify setting `Backing Image Recovery Wait Interval` to a shorter value so that the backing image will start auto recovery eariler.
+1. Modify setting `Backing Image Recovery Wait Interval` to a shorter value so that the backing image will start auto recovery earlier.
2. Create a backing image file with type `Download From URL`.
3. Launch a volume using the backing image file so that there are 2 disk records for the backing image.
4. Modify one disk file for the backing image and make sure the file size is not changed. This will lead to data inconsistency/corruption later. e.g.,
diff --git a/docs/content/manual/release-specific/v1.3.0/extend_CSI_snapshot_support.md b/docs/content/manual/release-specific/v1.3.0/extend_CSI_snapshot_support.md
index d096cdd538..d70c46ecdc 100644
--- a/docs/content/manual/release-specific/v1.3.0/extend_CSI_snapshot_support.md
+++ b/docs/content/manual/release-specific/v1.3.0/extend_CSI_snapshot_support.md
@@ -132,7 +132,7 @@ https://github.com/longhorn/longhorn/issues/2534
* Scale down the workload to detach the `test-vol`
* Create the same PVC `test-restore-pvc` as in the `Source volume is attached && Longhorn snapshot exist` section
* Verify that PVC provisioning failed because the source volume is detached so Longhorn cannot verify the existence of the Longhorn snapshot in the source volume.
- * Scale up the workload to attache `test-vol`
+ * Scale up the workload to attach `test-vol`
* Wait for PVC to finish provisioning and be bounded
* Attach the PVC `test-restore-pvc` and verify the data
* Delete the PVC
diff --git a/docs/content/manual/release-specific/v1.6.0/test-engine-version-enforcement.md b/docs/content/manual/release-specific/v1.6.0/test-engine-version-enforcement.md
index ba4f32d956..0d2b543e78 100644
--- a/docs/content/manual/release-specific/v1.6.0/test-engine-version-enforcement.md
+++ b/docs/content/manual/release-specific/v1.6.0/test-engine-version-enforcement.md
@@ -22,7 +22,7 @@ longhorn-manager-grhsf 0/1 CrashLoopBackOff
```
And should see incompatible version error in longhorn-manager Pod logs
```
-time="2023-08-17T03:03:20Z" level=fatal msg="Error starting manager: failed checking Engine upgarde path: incompatible Engine ei-7fa7c208 client API version: found version 7 is below required minimal version 8"
+time="2023-08-17T03:03:20Z" level=fatal msg="Error starting manager: failed checking Engine upgrade path: incompatible Engine ei-7fa7c208 client API version: found version 7 is below required minimal version 8"
```
**When** downgraded Longhorn to v1.5.x
@@ -39,5 +39,5 @@ ei-7fa7c208 true deployed longhornio/longhorn-engine:v1.4.1 0
ei-ad420081 false deployed c3y1huang/research:2017-lh-ei 0 44h 24s
```
-**When** update existing volume/engine/replica custom resourcs `spec.image` with `longhornio/longhorn-engine:v1.4.x`
+**When** update existing volume/engine/replica custom resources `spec.image` with `longhornio/longhorn-engine:v1.4.x`
**Then** should be blocked
diff --git a/docs/content/manual/release-specific/v1.6.0/test-rebuild-in-meta-blocks-engine-start.md b/docs/content/manual/release-specific/v1.6.0/test-rebuild-in-meta-blocks-engine-start.md
index f81a56c604..a1cfaed7e0 100644
--- a/docs/content/manual/release-specific/v1.6.0/test-rebuild-in-meta-blocks-engine-start.md
+++ b/docs/content/manual/release-specific/v1.6.0/test-rebuild-in-meta-blocks-engine-start.md
@@ -32,7 +32,7 @@ index b48ddd46..c4523f11 100644
**And** the `auto-salvage` setting is set to `true`.
**And** a new StorageClass is created with `NumberOfReplica` set to `1`.
**And** a StatefulSet is created with `Replica` set to `1`.
-**And** the node of the StatefulSet Pod and the node of its volume Replica are different. This is necessary to trigger the rebuilding in reponse to the data locality setting update later.
+**And** the node of the StatefulSet Pod and the node of its volume Replica are different. This is necessary to trigger the rebuilding in response to the data locality setting update later.
**And** Volume have 1 running Replica.
**And** data exists in the volume.
diff --git a/engine/environment-setup/setupRancher.py b/engine/environment-setup/setupRancher.py
index 8882c14343..4c49b28428 100644
--- a/engine/environment-setup/setupRancher.py
+++ b/engine/environment-setup/setupRancher.py
@@ -32,7 +32,7 @@ def silent_remove_file(filename):
os.remove(filename)
except OSError as e:
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
- raise # re-raise exception if a different error occured
+ raise # re-raise exception if a different error occurred
def gce_create_instance(compute, name, gce_startup_script):
diff --git a/manager/integration/tests/common.py b/manager/integration/tests/common.py
index 28503ae13e..cedabb6155 100644
--- a/manager/integration/tests/common.py
+++ b/manager/integration/tests/common.py
@@ -2287,7 +2287,7 @@ class AssertErrorCheckThread(threading.Thread):
Parameters:
target : The threading function.
- args : Arguments of the target fucntion.
+ args : Arguments of the target function.
"""
def __init__(self, target, args):
threading.Thread.__init__(self)
@@ -5758,7 +5758,7 @@ def generate_support_bundle(case_name): # NOQA
Generate support bundle into folder ./support_bundle/case_name.zip
Won't generate support bundle if current support bundle count
- greate than MAX_SUPPORT_BINDLE_NUMBER.
+ greater than MAX_SUPPORT_BINDLE_NUMBER.
Args:
case_name: support bundle will named case_name.zip
"""
@@ -5808,7 +5808,7 @@ def generate_support_bundle(case_name): # NOQA
with open('./support_bundle/{0}.zip'.format(case_name), 'wb') as f:
f.write(r.content)
except Exception as e:
- warnings.warn("Error occured while downloading support bundle {}.zip\n\
+ warnings.warn("Error occurred when downloading support bundle {}.zip\n\
The error was {}".format(case_name, e))
diff --git a/manager/integration/tests/test_backing_image.py b/manager/integration/tests/test_backing_image.py
index 5fac7c272b..118fbfccbe 100644
--- a/manager/integration/tests/test_backing_image.py
+++ b/manager/integration/tests/test_backing_image.py
@@ -431,7 +431,7 @@ def test_backing_image_with_disk_migration(): # NOQA
`-` is removed.
9. Remount the host disk to another path. Then create another Longhorn disk
based on the migrated path (disk migration).
- 10. Verify the followings.
+ 10. Verify the following.
1. The disk added in step3 (before the migration) should
be "unschedulable".
2. The disk added in step9 (after the migration) should
diff --git a/manager/integration/tests/test_basic.py b/manager/integration/tests/test_basic.py
index bedd863e03..22596c6e58 100644
--- a/manager/integration/tests/test_basic.py
+++ b/manager/integration/tests/test_basic.py
@@ -342,7 +342,7 @@ def test_volume_iscsi_basic(client, volume_name): # NOQA
1. Create and attach a volume with iscsi frontend
2. Check the volume endpoint and connect it using the iscsi
- initator on the node.
+ initiator on the node.
3. Write then read back volume data for validation
"""
@@ -3421,7 +3421,7 @@ def test_allow_volume_creation_with_degraded_availability(client, volume_name):
2. `node-level-soft-anti-affinity` to false.
Steps:
- (degraded availablity)
+ (degraded availability)
1. Disable scheduling for node 2 and 3.
2. Create a volume with three replicas.
1. Volume should be `ready` after creation and `Scheduled` is true.
diff --git a/manager/integration/tests/test_csi_snapshotter.py b/manager/integration/tests/test_csi_snapshotter.py
index b1bf905f26..05493e5ce9 100644
--- a/manager/integration/tests/test_csi_snapshotter.py
+++ b/manager/integration/tests/test_csi_snapshotter.py
@@ -435,7 +435,7 @@ def csi_volumesnapshot_creation_test(snapshotClass=longhorn|custom):
4. check creation of a new longhorn snapshot named `snapshot-uuid`
5. check for `VolumeSnapshotContent` named `snapcontent-uuid`
6. wait for `VolumeSnapshotContent.readyToUse` flag to be set to **true**
- 7. check for backup existance on the backupstore
+ 7. check for backup existence on the backupstore
# the csi snapshot restore sets the fromBackup field same as
# the StorageClass based restore approach.
@@ -860,16 +860,16 @@ def test_csi_snapshot_snap_create_volume_from_snapshot(apps_api, # NOQA
- Attach the PVC and verify data
- Source volume is detached
- Scale down the workload
- - Create PVC from VolumeSnapshot generated from step beggining
+ - Create PVC from VolumeSnapshot generated from step beginning
- Verify PVC provision failed
- Scale up the workload
- Wait for PVC to finish provisioning and be bounded
- Attach the PVC test-restore-pvc and verify the data
- Source volume is attached && Longhorn snapshot doesn’t exist
- Use VolumeSnapshotContent.snapshotHandle to
- specify Longhorn snapshot generated in step beggining
+ specify Longhorn snapshot generated in step beginning
- Delete the Longhorn snapshot
- - Create PVC from VolumeSnapshot generated from step beggining
+ - Create PVC from VolumeSnapshot generated from step beginning
- PVC should be stuck in provisioning state
"""
vol, deployment, csisnapclass, expected_md5sum = \
diff --git a/manager/integration/tests/test_engine_upgrade.py b/manager/integration/tests/test_engine_upgrade.py
index 2c16d6a8c1..310b31e20e 100644
--- a/manager/integration/tests/test_engine_upgrade.py
+++ b/manager/integration/tests/test_engine_upgrade.py
@@ -43,7 +43,7 @@ def test_engine_image(client, core_api, volume_name): # NOQA
"""
Test Engine Image deployment
- 1. List Engine Images and validate basic properities.
+ 1. List Engine Images and validate basic properties.
2. Try deleting default engine image and it should fail.
3. Try creating a duplicate engine image as default and it should fail
4. Get upgrade test image for the same versions
diff --git a/manager/integration/tests/test_ha.py b/manager/integration/tests/test_ha.py
index 797d7ee484..c5ee886185 100644
--- a/manager/integration/tests/test_ha.py
+++ b/manager/integration/tests/test_ha.py
@@ -1033,7 +1033,7 @@ def test_inc_restoration_with_multiple_rebuild_and_expansion(set_random_backupst
wait_for_volume_healthy(client, std_volume_name)
# Step 9:
- # When the total writen data size is more than 1Gi, there must be data in
+ # When the total written data size is more than 1Gi, there must be data in
# the expanded part.
data_path2 = "/data/test2"
write_pod_volume_random_data(core_api, std_pod_name,
@@ -1093,7 +1093,7 @@ def test_inc_restoration_with_multiple_rebuild_and_expansion(set_random_backupst
wait_for_volume_expansion(client, std_volume_name)
# Step 15:
- # When the total writen data size is more than 2Gi, there must be data in
+ # When the total written data size is more than 2Gi, there must be data in
# the 2nd expanded part.
data_path3 = "/data/test3"
write_pod_volume_random_data(core_api, std_pod_name,
@@ -1689,7 +1689,7 @@ def test_engine_crash_for_restore_volume(set_random_backupstore, client, core_ap
# The complete state transition would be like:
# detaching -> detached -> attaching -> attached -> restore -> detached .
# Now the state change too fast, script eventually caught final detach
- # So temporaly comment out below line of code
+ # So temporarily comment out below line of code
# wait_for_volume_detached(client, res_name)
res_volume = wait_for_volume_healthy_no_frontend(client, res_name)
@@ -1806,7 +1806,7 @@ def test_engine_crash_for_dr_volume(set_random_backupstore, client, core_api, vo
# The complete state transition would be like:
# detaching -> detached -> attaching -> attached -> restore -> detached .
# Now the state change too fast, script eventually caught final detach
- # So temporaly comment out below line of code
+ # So temporarily comment out below line of code
# wait_for_volume_detached(client, dr_volume_name)
# Check if the DR volume is auto reattached then continue
@@ -1943,10 +1943,10 @@ def test_extra_replica_cleanup(client, volume_name, settings_reset): # NOQA
save the checksum.
4. Increase the volume replica number to 4.
5. Volume should show failed to schedule and an extra stop replica.
- 6. Decrease the volume replica nubmer to 3.
+ 6. Decrease the volume replica number to 3.
7. Volume should show healthy and the extra failed to scheduled replica
should be removed.
- 8. Check the data in the volume and make sure it's same as the chechsum.
+ 8. Check the data in the volume and make sure it's same as the checksum.
"""
replica_node_soft_anti_affinity_setting = \
client.by_id_setting(SETTING_REPLICA_NODE_SOFT_ANTI_AFFINITY)
diff --git a/manager/integration/tests/test_infra.py b/manager/integration/tests/test_infra.py
index 6842db3090..73f0995ca4 100644
--- a/manager/integration/tests/test_infra.py
+++ b/manager/integration/tests/test_infra.py
@@ -184,7 +184,7 @@ def test_offline_node(reset_cluster_ready_status):
"""
Test offline node
- 1. Bring down one of the nodes in Kuberntes cluster (avoid current node)
+ 1. Bring down one of the nodes in Kubernetes cluster (avoid current node)
2. Make sure the Longhorn node state become `down`
"""
pod_lable_selector = "longhorn-test=test-job"
diff --git a/manager/integration/tests/test_metric.py b/manager/integration/tests/test_metric.py
index 3210cf1f00..5223c70abe 100644
--- a/manager/integration/tests/test_metric.py
+++ b/manager/integration/tests/test_metric.py
@@ -82,7 +82,7 @@ def find_metrics(metric_data, metric_name):
def check_metric_with_condition(core_api, metric_name, metric_labels, expected_value=None, metric_node_id=get_self_host_id()): # NOQA)
"""
- Some metric have multiple conditions, for exameple metric
+ Some metric have multiple conditions, for example metric
longhorn_node_status have condition
- allowScheduling
- mountpropagation
diff --git a/manager/integration/tests/test_node.py b/manager/integration/tests/test_node.py
index 1e4ad5dd32..4f95978d4d 100644
--- a/manager/integration/tests/test_node.py
+++ b/manager/integration/tests/test_node.py
@@ -190,7 +190,7 @@ def test_node_disk_update(client): # NOQA
3. Create two disks `disk1` and `disk2`, attach them to the current node.
4. Add two disks to the current node.
5. Verify two extra disks have been added to the node
- 6. Disbale the two disks' scheduling, and set StorageReserved
+ 6. Disable the two disks' scheduling, and set StorageReserved
7. Update the two disks.
8. Validate all the disks properties.
9. Delete other two disks. Validate deletion works.
@@ -1919,7 +1919,7 @@ def test_node_config_annotation_missing(client, core_api, reset_default_disk_lab
3. Verify disk update works.
4. Verify tag update works
5. Verify using tag annotation for configuration works.
- 6. After remove the tag annotaion, verify unset tag node works fine.
+ 6. After remove the tag annotation, verify unset tag node works fine.
7. Set tag annotation again. Verify node updated for the tag.
"""
setting = client.by_id_setting(SETTING_CREATE_DEFAULT_DISK_LABELED_NODES)
@@ -2012,7 +2012,7 @@ def test_replica_scheduler_rebuild_restore_is_too_big(set_random_backupstore, cl
data cannot fit in the small disk
6. Delete a replica of volume.
1. Verify the volume reports `scheduled = false` due to unable to find
- a suitable disk for rebuliding replica, since the replica with the
+ a suitable disk for rebuilding replica, since the replica with the
existing data cannot fit in the small disk
6. Enable the scheduling for other disks, disable scheduling for small disk
7. Verify the volume reports `scheduled = true`. And verify the data.
diff --git a/manager/integration/tests/test_rwx.py b/manager/integration/tests/test_rwx.py
index 79ea321117..2132acf020 100644
--- a/manager/integration/tests/test_rwx.py
+++ b/manager/integration/tests/test_rwx.py
@@ -538,7 +538,7 @@ def test_rwx_online_expansion(): # NOQA
- Create a rwx pvc using longhorn storage class of size 1 Gi.
And
- - Atach it to a workload (deployment) and write some data.
+ - Attach it to a workload (deployment) and write some data.
When
- Expand the volume to 5 Gi
@@ -566,7 +566,7 @@ def test_rwx_offline_expansion(client, core_api, pvc, make_deployment_with_pvc):
- Create a rwx pvc using longhorn storage class of size 1 Gi.
And
- - Atach it to a workload (deployment) and write some data.
+ - Attach it to a workload (deployment) and write some data.
- Scale down the workload, wait volume detached
- Share manager pod will terminate automatically
- Expand the volume to 4 Gi, wait exoansion complete
diff --git a/manager/integration/tests/test_scheduling.py b/manager/integration/tests/test_scheduling.py
index 2c164cad38..e6ffacd7c5 100644
--- a/manager/integration/tests/test_scheduling.py
+++ b/manager/integration/tests/test_scheduling.py
@@ -1917,7 +1917,7 @@ def test_global_disk_soft_anti_affinity(client, volume_name, request): # NOQA
assert num_running == 2
# After enable SETTING_REPLICA_DISK_SOFT_ANTI_AFFINITY to true,
- # replicas can schedule on the same disk, threrefore volume become healthy
+ # replicas can schedule on the same disk, therefore volume become healthy
update_setting(client, SETTING_REPLICA_DISK_SOFT_ANTI_AFFINITY, "true")
volume = wait_for_volume_healthy(client, volume_name)
@@ -2088,7 +2088,7 @@ def test_volume_disk_soft_anti_affinity(client, volume_name, request): # NOQA
assert num_running == 2
# After set update volume.updateReplicaDiskSoftAntiAffinity to enabled,
- # replicas can schedule on the same disk, threrefore volume become healthy
+ # replicas can schedule on the same disk, therefore volume become healthy
volume = volume.updateReplicaDiskSoftAntiAffinity(
replicaDiskSoftAntiAffinity="enabled")
assert volume.replicaDiskSoftAntiAffinity == "enabled"
diff --git a/manager/integration/tests/test_settings.py b/manager/integration/tests/test_settings.py
index 1f025b2fae..aff12a2732 100644
--- a/manager/integration/tests/test_settings.py
+++ b/manager/integration/tests/test_settings.py
@@ -995,7 +995,7 @@ def setting_concurrent_volume_backup_restore_limit_concurrent_restoring_test(cli
break
assert is_case_tested, \
- f"Unexpected cocurrent count: {concurrent_count}\n"
+ f"Unexpected concurrent count: {concurrent_count}\n"
for restore_volume_name in restore_volume_names:
if is_DR_volumes:
@@ -1197,7 +1197,7 @@ def test_setting_update_with_invalid_value_via_configmap(core_api, request): #
2. Initialize longhorn-default-setting configmap containing
valid and invalid settings
3. Update longhorn-default-setting configmap with invalid settings.
- The invalid settings SETTING_TAINT_TOLERATION will be ingored
+ The invalid settings SETTING_TAINT_TOLERATION will be ignored
when there is an attached volume.
4. Validate the default settings values.
"""
diff --git a/manager/integration/tests/test_statefulset.py b/manager/integration/tests/test_statefulset.py
index a4a216dbd7..428119cfae 100644
--- a/manager/integration/tests/test_statefulset.py
+++ b/manager/integration/tests/test_statefulset.py
@@ -100,7 +100,7 @@ def test_statefulset_mount(client, core_api, storage_class, statefulset): # NOQ
1. Create a StatefulSet using dynamic provisioned Longhorn volume.
2. Wait for pods to become running
- 3. Check volume properites are consistent with the StorageClass
+ 3. Check volume properties are consistent with the StorageClass
"""
statefulset_name = 'statefulset-mount-test'
@@ -138,7 +138,7 @@ def test_statefulset_scaling(client, core_api, storage_class, statefulset): # N
1. Create a StatefulSet with VolumeClaimTemplate and Longhorn.
2. Wait for pods to run.
- 3. Verify the properities of volumes.
+ 3. Verify the properties of volumes.
4. Scale the StatefulSet to 3 replicas
5. Wait for the new pod to become ready.
6. Verify the new volume properties.
@@ -259,7 +259,7 @@ def test_statefulset_backup(set_random_backupstore, client, core_api, storage_cl
4. Create a third snapshot
5. Backup the snapshot `backup_snapshot`
6. Wait for backup to show up.
- 1 Verify the backup informations
+ 1 Verify the backup information
"""
statefulset_name = 'statefulset-backup-test'
diff --git a/scalability_test/script/monitor.py b/scalability_test/script/monitor.py
index de03533460..cc195d7b2a 100644
--- a/scalability_test/script/monitor.py
+++ b/scalability_test/script/monitor.py
@@ -50,7 +50,7 @@ def update_data(self):
node_list = []
try:
pod_list = self.core_api_v1.list_namespaced_pod("default")
- # TODO: change to catch any exeption and count the number of api exceptions
+ # TODO: change to catch any exception and count the number of api exceptions
except client.ApiException as e:
print("Exception when calling CoreV1Api->list_namespaced_pod: %s\n" % e)
print("Skipping this update")
@@ -58,7 +58,7 @@ def update_data(self):
try:
node_list = self.custom_objects_api.list_cluster_custom_object("metrics.k8s.io", "v1beta1", "nodes")
- # TODO: change to catch any exeption and count the number of api exceptions
+ # TODO: change to catch any exception and count the number of api exceptions
except client.ApiException as e:
print("Exception when calling custom_objects_api->list_cluster_custom_object: %s\n" % e)
print("Will set node metrics to 0")
@@ -76,12 +76,12 @@ def update_data(self):
if pod_with_valid_starting_time_count < running_pod_count and MAX_POD_STARTING_TIME_POINT not in self.annotating_points:
self.annotating_points[MAX_POD_STARTING_TIME_POINT] = {
"xy": (diff.total_seconds(),
- pod_with_valid_starting_time_count), "desciption": "(1) "+str(pod_with_valid_starting_time_count)+" pods",
+ pod_with_valid_starting_time_count), "description": "(1) "+str(pod_with_valid_starting_time_count)+" pods",
"color": "tab:orange"}
if crashing_pod_count > self.max_pod_crashing_count and MAX_POD_CRASHING_POINT not in self.annotating_points:
self.annotating_points[MAX_POD_CRASHING_POINT] = {
"xy": (diff.total_seconds(),
- pod_with_valid_starting_time_count), "desciption": "(2) "+str(pod_with_valid_starting_time_count)+" pods",
+ pod_with_valid_starting_time_count), "description": "(2) "+str(pod_with_valid_starting_time_count)+" pods",
"color": "tab:red"}
for node in node_list['items']:
@@ -101,7 +101,7 @@ def update_data(self):
self.cpu_metrics[node_name] = cpu_metric
self.ram_metrics[node_name] = ram_metric
- # update node metrics with value 0 if the infomation is missing in the above update
+ # update node metrics with value 0 if the information is missing in the above update
for metric in self.cpu_metrics.values():
if len(metric) < len(self.time_diffs):
cpu_metric.extend([0]*(len(self.time_diffs)-len(metric)))
@@ -192,10 +192,10 @@ def draw(self):
ax1, ax2, ax3 = self.axes
ax1.plot(self.time_diffs, self.running_pod_metric)
- ax1.set_ylabel('Number of running pods')
+ informationsinformationsax1.set_ylabel('Number of running pods')
for point in self.annotating_points.values():
- ax1.annotate(point["desciption"],
+ ax1.annotate(point["description"],
xy= point["xy"], xycoords='data',
xytext=(0, 20), textcoords='offset points',
arrowprops=dict(facecolor=point["color"], shrink=0.05),
diff --git a/scalability_test/script/scale-test.py b/scalability_test/script/scale-test.py
index d3164f7442..e343617ffd 100644
--- a/scalability_test/script/scale-test.py
+++ b/scalability_test/script/scale-test.py
@@ -33,7 +33,7 @@ def get_node_capacities():
# hugepages-2Mi: '0'
# memory: 32412804Ki
# pods: '110'
- cpu = int(i.status.capacity["cpu"])*1000**3 # conver to nano cpu
+ cpu = int(i.status.capacity["cpu"])*1000**3 # convert to nano cpu
ram = int(i.status.capacity["memory"][:-2])
node_capacities[i.metadata.name] = {"cpu": cpu, "ram": ram}
diff --git a/secscan/terraform/aws/main.tf b/secscan/terraform/aws/main.tf
index 81fed9e5a1..52bd9be406 100644
--- a/secscan/terraform/aws/main.tf
+++ b/secscan/terraform/aws/main.tf
@@ -90,7 +90,7 @@ resource "aws_route_table" "lh-secscan_aws_public_rt" {
}
}
-# Assciate public subnet to public route table
+# Associate public subnet to public route table
resource "aws_route_table_association" "lh-secscan_aws_public_subnet_rt_association" {
depends_on = [
aws_subnet.lh-secscan_aws_public_subnet,
diff --git a/test_framework/terraform/aws/centos/main.tf b/test_framework/terraform/aws/centos/main.tf
index 758a625be2..7097a5a1fd 100644
--- a/test_framework/terraform/aws/centos/main.tf
+++ b/test_framework/terraform/aws/centos/main.tf
@@ -250,7 +250,7 @@ resource "aws_route_table" "lh_aws_private_rt" {
}
}
-# Assciate public subnet to public route table
+# Associate public subnet to public route table
resource "aws_route_table_association" "lh_aws_public_subnet_rt_association" {
depends_on = [
aws_subnet.lh_aws_public_subnet,
@@ -261,7 +261,7 @@ resource "aws_route_table_association" "lh_aws_public_subnet_rt_association" {
route_table_id = aws_route_table.lh_aws_public_rt.id
}
-# Assciate private subnet to private route table
+# Associate private subnet to private route table
resource "aws_route_table_association" "lh_aws_private_subnet_rt_association" {
depends_on = [
aws_subnet.lh_aws_private_subnet,
diff --git a/test_framework/terraform/aws/oracle/main.tf b/test_framework/terraform/aws/oracle/main.tf
index 4b22f7a21f..3fddf19914 100644
--- a/test_framework/terraform/aws/oracle/main.tf
+++ b/test_framework/terraform/aws/oracle/main.tf
@@ -250,7 +250,7 @@ resource "aws_route_table" "lh_aws_private_rt" {
}
}
-# Assciate public subnet to public route table
+# Associate public subnet to public route table
resource "aws_route_table_association" "lh_aws_public_subnet_rt_association" {
depends_on = [
aws_subnet.lh_aws_public_subnet,
@@ -261,7 +261,7 @@ resource "aws_route_table_association" "lh_aws_public_subnet_rt_association" {
route_table_id = aws_route_table.lh_aws_public_rt.id
}
-# Assciate private subnet to private route table
+# Associate private subnet to private route table
resource "aws_route_table_association" "lh_aws_private_subnet_rt_association" {
depends_on = [
aws_subnet.lh_aws_private_subnet,
diff --git a/test_framework/terraform/aws/rhel/main.tf b/test_framework/terraform/aws/rhel/main.tf
index 4b22f7a21f..3fddf19914 100644
--- a/test_framework/terraform/aws/rhel/main.tf
+++ b/test_framework/terraform/aws/rhel/main.tf
@@ -250,7 +250,7 @@ resource "aws_route_table" "lh_aws_private_rt" {
}
}
-# Assciate public subnet to public route table
+# Associate public subnet to public route table
resource "aws_route_table_association" "lh_aws_public_subnet_rt_association" {
depends_on = [
aws_subnet.lh_aws_public_subnet,
@@ -261,7 +261,7 @@ resource "aws_route_table_association" "lh_aws_public_subnet_rt_association" {
route_table_id = aws_route_table.lh_aws_public_rt.id
}
-# Assciate private subnet to private route table
+# Associate private subnet to private route table
resource "aws_route_table_association" "lh_aws_private_subnet_rt_association" {
depends_on = [
aws_subnet.lh_aws_private_subnet,
diff --git a/test_framework/terraform/aws/rockylinux/main.tf b/test_framework/terraform/aws/rockylinux/main.tf
index 02cf5120a5..e8e7be4756 100644
--- a/test_framework/terraform/aws/rockylinux/main.tf
+++ b/test_framework/terraform/aws/rockylinux/main.tf
@@ -251,7 +251,7 @@ resource "aws_route_table" "lh_aws_private_rt" {
}
}
-# Assciate public subnet to public route table
+# Associate public subnet to public route table
resource "aws_route_table_association" "lh_aws_public_subnet_rt_association" {
depends_on = [
aws_subnet.lh_aws_public_subnet,
@@ -262,7 +262,7 @@ resource "aws_route_table_association" "lh_aws_public_subnet_rt_association" {
route_table_id = aws_route_table.lh_aws_public_rt.id
}
-# Assciate private subnet to private route table
+# Associate private subnet to private route table
resource "aws_route_table_association" "lh_aws_private_subnet_rt_association" {
depends_on = [
aws_subnet.lh_aws_private_subnet,
diff --git a/test_framework/terraform/aws/sles/main.tf b/test_framework/terraform/aws/sles/main.tf
index 665dd5b946..0e78f0a6c6 100644
--- a/test_framework/terraform/aws/sles/main.tf
+++ b/test_framework/terraform/aws/sles/main.tf
@@ -258,7 +258,7 @@ resource "aws_route_table" "lh_aws_private_rt" {
}
}
-# Assciate public subnet to public route table
+# Associate public subnet to public route table
resource "aws_route_table_association" "lh_aws_public_subnet_rt_association" {
depends_on = [
aws_subnet.lh_aws_public_subnet,
@@ -269,7 +269,7 @@ resource "aws_route_table_association" "lh_aws_public_subnet_rt_association" {
route_table_id = aws_route_table.lh_aws_public_rt.id
}
-# Assciate private subnet to private route table
+# Associate private subnet to private route table
resource "aws_route_table_association" "lh_aws_private_subnet_rt_association" {
depends_on = [
aws_subnet.lh_aws_private_subnet,
diff --git a/test_framework/terraform/aws/ubuntu/main.tf b/test_framework/terraform/aws/ubuntu/main.tf
index 5ce977a111..956411de0f 100644
--- a/test_framework/terraform/aws/ubuntu/main.tf
+++ b/test_framework/terraform/aws/ubuntu/main.tf
@@ -252,7 +252,7 @@ resource "aws_route_table" "lh_aws_private_rt" {
}
}
-# Assciate public subnet to public route table
+# Associate public subnet to public route table
resource "aws_route_table_association" "lh_aws_public_subnet_rt_association" {
depends_on = [
aws_subnet.lh_aws_public_subnet,
@@ -263,7 +263,7 @@ resource "aws_route_table_association" "lh_aws_public_subnet_rt_association" {
route_table_id = aws_route_table.lh_aws_public_rt.id
}
-# Assciate private subnet to private route table
+# Associate private subnet to private route table
resource "aws_route_table_association" "lh_aws_private_subnet_rt_association" {
depends_on = [
aws_subnet.lh_aws_private_subnet,
diff --git a/test_tools/gen_data/README.md b/test_tools/gen_data/README.md
index 3e73e6720a..f46e6661c3 100644
--- a/test_tools/gen_data/README.md
+++ b/test_tools/gen_data/README.md
@@ -7,7 +7,7 @@ Modify config.yaml
storage: 1Gi # Each volume size
storageClass: longhorn-test # Need to prepare your own storage class first
dataSizeInMb: 500
-namespace: default # Nees to prepare first before run script
+namespace: default # Needs to prepare first before run script
statefulSet: # Single RWO/RWX statefulset and its replica counts
rwo:
replicas: 1
diff --git a/test_tools/gen_data/run.sh b/test_tools/gen_data/run.sh
index d9b786786a..aa5da23629 100755
--- a/test_tools/gen_data/run.sh
+++ b/test_tools/gen_data/run.sh
@@ -120,7 +120,7 @@ check_config_input() {
DEPLOYMENT_RWX_REPLICAS=$(yq eval '.deployment.rwx.deploymentReplicas' config.yaml)
msg="$CONFIG_FILE is not correct, please check"
- # varialbe = "null" when yq not find yaml field
+ # variable = "null" when yq not find yaml field
[ "$STORAGE_SIZE" = "null" -o ${#STORAGE_SIZE} -eq 0 ] && error "$msg" && exit 2
[ "$NAMESPACE" = "null" -o ${#NAMESPACE} -eq 0 ] && error "$msg" && exit 2
[ "$STORAGE_CLASS_NAME" = "null" -o ${#STORAGE_CLASS_NAME} -eq 0 ] && error "$msg" && exit 2