diff --git a/e2e/README.md b/e2e/README.md index e2933e0924..898ace2ba2 100644 --- a/e2e/README.md +++ b/e2e/README.md @@ -14,10 +14,14 @@ curl -sSfL https://raw.githubusercontent.com/longhorn/longhorn/master/scripts/en ### Run the test -1. Deploy all backupstore servers (including `NFS` server and `Minio` as s3 server) for test purposes. +1. Deploy all backupstore servers (including `NFS` server and `Minio` as s3 server, `CIFS` and `Azurite` server) for test purposes. + + For Azurite, there are some manual steps need to be done after manifest deployed(https://github.com/longhorn/longhorn-tests/wiki/Setup-Azurite-Backupstore-For-Testing). ``` -kubectl create -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/minio-backupstore.yaml \ - -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/nfs-backupstore.yaml +kubectl create -f https://raw.githubusercontent.com/longhorn/longhorn-tests/master/manager/integration/deploy/backupstores/minio-backupstore.yaml \ + -f https://raw.githubusercontent.com/longhorn/longhorn-tests/master/manager/integration/deploy/backupstores/nfs-backupstore.yaml \ + -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/cifs-backupstore.yaml \ + -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/azurite-backupstore.yaml ``` 1. Expose Longhorn API: diff --git a/manager/integration/README.md b/manager/integration/README.md index 77fcef7cf5..3cdd7c3677 100644 --- a/manager/integration/README.md +++ b/manager/integration/README.md @@ -16,10 +16,14 @@ Requirement: 6. Make sure `nfs-common` or equivalent has been installed on the node to allow the NFS client to work. Run the test: -1. Deploy all backupstore servers(including `NFS` server and `Minio` as s3 server) for test purposes. +1. Deploy all backupstore servers(including `NFS` server and `Minio` as s3 server `CIFS` and `Azurite` server) for test purposes. + + For Azurite, there are some manual steps need to be done after manifest deployed(https://github.com/longhorn/longhorn-tests/wiki/Setup-Azurite-Backupstore-For-Testing). ``` -kubectl create -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/minio-backupstore.yaml \ - -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/nfs-backupstore.yaml +kubectl create -f https://raw.githubusercontent.com/longhorn/longhorn-tests/master/manager/integration/deploy/backupstores/minio-backupstore.yaml \ + -f https://raw.githubusercontent.com/longhorn/longhorn-tests/master/manager/integration/deploy/backupstores/nfs-backupstore.yaml \ + -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/cifs-backupstore.yaml \ + -f https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/azurite-backupstore.yaml ``` 2. Deploy the test script to the Kubernetes cluster. ``` diff --git a/manager/integration/deploy/backupstores/minio-backupstore.yaml b/manager/integration/deploy/backupstores/minio-backupstore.yaml new file mode 100644 index 0000000000..398bc6a765 --- /dev/null +++ b/manager/integration/deploy/backupstores/minio-backupstore.yaml @@ -0,0 +1,112 @@ +apiVersion: v1 +kind: Secret +metadata: + name: minio-secret + namespace: default +type: Opaque +data: + AWS_ACCESS_KEY_ID: bG9uZ2hvcm4tdGVzdC1hY2Nlc3Mta2V5 # longhorn-test-access-key + AWS_SECRET_ACCESS_KEY: bG9uZ2hvcm4tdGVzdC1zZWNyZXQta2V5 # longhorn-test-secret-key + AWS_ENDPOINTS: aHR0cHM6Ly9taW5pby1zZXJ2aWNlLmRlZmF1bHQ6OTAwMA== # https://minio-service.default:9000 + AWS_CERT: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMRENDQWhTZ0F3SUJBZ0lSQU1kbzQycGhUZXlrMTcvYkxyWjVZRHN3RFFZSktvWklodmNOQVFFTEJRQXcKR2pFWU1CWUdBMVVFQ2hNUFRHOXVaMmh2Y200Z0xTQlVaWE4wTUNBWERUSXdNRFF5TnpJek1EQXhNVm9ZRHpJeApNakF3TkRBek1qTXdNREV4V2pBYU1SZ3dGZ1lEVlFRS0V3OU1iMjVuYUc5eWJpQXRJRlJsYzNRd2dnRWlNQTBHCkNTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFEWHpVdXJnUFpEZ3pUM0RZdWFlYmdld3Fvd2RlQUQKODRWWWF6ZlN1USs3K21Oa2lpUVBvelVVMmZvUWFGL1BxekJiUW1lZ29hT3l5NVhqM1VFeG1GcmV0eDBaRjVOVgpKTi85ZWFJNWRXRk9teHhpMElPUGI2T0RpbE1qcXVEbUVPSXljdjRTaCsvSWo5Zk1nS0tXUDdJZGxDNUJPeThkCncwOVdkckxxaE9WY3BKamNxYjN6K3hISHd5Q05YeGhoRm9tb2xQVnpJbnlUUEJTZkRuSDBuS0lHUXl2bGhCMGsKVHBHSzYxc2prZnFTK3hpNTlJeHVrbHZIRXNQcjFXblRzYU9oaVh6N3lQSlorcTNBMWZoVzBVa1JaRFlnWnNFbQovZ05KM3JwOFhZdURna2kzZ0UrOElXQWRBWHExeWhqRDdSSkI4VFNJYTV0SGpKUUtqZ0NlSG5HekFnTUJBQUdqCmF6QnBNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUIKQWY4RUJUQURBUUgvTURFR0ExVWRFUVFxTUNpQ0NXeHZZMkZzYUc5emRJSVZiV2x1YVc4dGMyVnlkbWxqWlM1awpaV1poZFd4MGh3Ui9BQUFCTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFDbUZMMzlNSHVZMzFhMTFEajRwMjVjCnFQRUM0RHZJUWozTk9kU0dWMmQrZjZzZ3pGejFXTDhWcnF2QjFCMVM2cjRKYjJQRXVJQkQ4NFlwVXJIT1JNU2MKd3ViTEppSEtEa0Jmb2U5QWI1cC9VakpyS0tuajM0RGx2c1cvR3AwWTZYc1BWaVdpVWorb1JLbUdWSTI0Q0JIdgpnK0JtVzNDeU5RR1RLajk0eE02czNBV2xHRW95YXFXUGU1eHllVWUzZjFBWkY5N3RDaklKUmVWbENtaENGK0JtCmFUY1RSUWN3cVdvQ3AwYmJZcHlERFlwUmxxOEdQbElFOW8yWjZBc05mTHJVcGFtZ3FYMmtYa2gxa3lzSlEralAKelFadHJSMG1tdHVyM0RuRW0yYmk0TktIQVFIcFc5TXUxNkdRakUxTmJYcVF0VEI4OGpLNzZjdEg5MzRDYWw2VgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t + AWS_CERT_KEY: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRRFh6VXVyZ1BaRGd6VDMKRFl1YWViZ2V3cW93ZGVBRDg0VllhemZTdVErNyttTmtpaVFQb3pVVTJmb1FhRi9QcXpCYlFtZWdvYU95eTVYagozVUV4bUZyZXR4MFpGNU5WSk4vOWVhSTVkV0ZPbXh4aTBJT1BiNk9EaWxNanF1RG1FT0l5Y3Y0U2grL0lqOWZNCmdLS1dQN0lkbEM1Qk95OGR3MDlXZHJMcWhPVmNwSmpjcWIzeit4SEh3eUNOWHhoaEZvbW9sUFZ6SW55VFBCU2YKRG5IMG5LSUdReXZsaEIwa1RwR0s2MXNqa2ZxUyt4aTU5SXh1a2x2SEVzUHIxV25Uc2FPaGlYejd5UEpaK3EzQQoxZmhXMFVrUlpEWWdac0VtL2dOSjNycDhYWXVEZ2tpM2dFKzhJV0FkQVhxMXloakQ3UkpCOFRTSWE1dEhqSlFLCmpnQ2VIbkd6QWdNQkFBRUNnZ0VBZlVyQ1hrYTN0Q2JmZjNpcnp2cFFmZnVEbURNMzV0TmlYaDJTQVpSVW9FMFYKbSsvZ1UvdnIrN2s2eUgvdzhMOXhpZXFhQTljVkZkL0JuTlIrMzI2WGc2dEpCNko2ZGZxODJZdmZOZ0VDaUFMaQpqalNGemFlQmhnT3ZsWXZHbTR5OTU1Q0FGdjQ1cDNac1VsMTFDRXJlL1BGbGtaWHRHeGlrWFl6NC85UTgzblhZCnM2eDdPYTgyUjdwT2lraWh3Q0FvVTU3Rjc4ZWFKOG1xTmkwRlF2bHlxSk9QMTFCbVp4dm54ZU11S2poQjlPTnAKTFNwMWpzZXk5bDZNR2pVbjBGTG53RHZkVWRiK0ZlUEkxTjdWYUNBd3hJK3JHa3JTWkhnekhWWE92VUpON2t2QQpqNUZPNW9uNGgvK3hXbkYzM3lxZ0VvWWZ0MFFJL2pXS2NOV1d1a2pCd1FLQmdRRGVFNlJGRUpsT2Q1aVcxeW1qCm45RENnczVFbXFtRXN3WU95bkN3U2RhK1lNNnZVYmlac1k4WW9wMVRmVWN4cUh2NkFQWGpVd2NBUG1QVE9KRW8KMlJtS0xTYkhsTnc4bFNOMWJsWDBEL3Mzamc1R3VlVW9nbW5TVnhMa0h1OFhKR0o3VzFReEUzZG9IUHRrcTNpagpoa09QTnJpZFM0UmxqNTJwYkhscjUvQzRjUUtCZ1FENHhFYmpuck1heFV2b0xxVTRvT2xiOVc5UytSUllTc0cxCmxJUmgzNzZTV0ZuTTlSdGoyMTI0M1hkaE4zUFBtSTNNeiswYjdyMnZSUi9LMS9Cc1JUQnlrTi9kbkVuNVUxQkEKYm90cGZIS1Jvc1FUR1hIQkEvM0JrNC9qOWplU3RmVXgzZ2x3eUI0L2hORy9KM1ZVV2FXeURTRm5qZFEvcGJsRwp6VWlsSVBmK1l3S0JnUUNwMkdYYmVJMTN5TnBJQ3psS2JqRlFncEJWUWVDQ29CVHkvUHRncUtoM3BEeVBNN1kyCnZla09VMWgyQVN1UkhDWHRtQXgzRndvVXNxTFFhY1FEZEw4bXdjK1Y5eERWdU02TXdwMDBjNENVQmE1L2d5OXoKWXdLaUgzeFFRaVJrRTZ6S1laZ3JqSkxYYXNzT1BHS2cxbEFYV1NlckRaV3R3MEEyMHNLdXQ0NlEwUUtCZ0hGZQpxZHZVR0ZXcjhvTDJ0dzlPcmVyZHVJVTh4RnZVZmVFdHRRTVJ2N3pjRE5qT0gxUnJ4Wk9aUW0ySW92dkp6MTIyCnFKMWhPUXJtV3EzTHFXTCtTU3o4L3pqMG4vWERWVUIzNElzTFR2ODJDVnVXN2ZPRHlTSnVDRlpnZ0VVWkxZd3oKWDJRSm4xZGRSV1Z6S3hKczVJbDNXSERqL3dXZWxnaEJSOGtSZEZOM0FvR0FJNldDdjJQQ1lUS1ZZNjAwOFYwbgpyTDQ3YTlPanZ0Yy81S2ZxSjFpMkpKTUgyQi9jbU1WRSs4M2dpODFIU1FqMWErNnBjektmQVppZWcwRk9nL015ClB6VlZRYmpKTnY0QzM5KzdxSDg1WGdZTXZhcTJ0aDFEZWUvQ3NsMlM4QlV0cW5mc0VuMUYwcWhlWUJZb2RibHAKV3RUaE5oRi9oRVhzbkJROURyWkJKT1U9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K +--- +# same secret for longhorn-system namespace +apiVersion: v1 +kind: Secret +metadata: + name: minio-secret + namespace: longhorn-system +type: Opaque +data: + AWS_ACCESS_KEY_ID: bG9uZ2hvcm4tdGVzdC1hY2Nlc3Mta2V5 # longhorn-test-access-key + AWS_SECRET_ACCESS_KEY: bG9uZ2hvcm4tdGVzdC1zZWNyZXQta2V5 # longhorn-test-secret-key + AWS_ENDPOINTS: aHR0cHM6Ly9taW5pby1zZXJ2aWNlLmRlZmF1bHQ6OTAwMA== # https://minio-service.default:9000 + AWS_CERT: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMRENDQWhTZ0F3SUJBZ0lSQU1kbzQycGhUZXlrMTcvYkxyWjVZRHN3RFFZSktvWklodmNOQVFFTEJRQXcKR2pFWU1CWUdBMVVFQ2hNUFRHOXVaMmh2Y200Z0xTQlVaWE4wTUNBWERUSXdNRFF5TnpJek1EQXhNVm9ZRHpJeApNakF3TkRBek1qTXdNREV4V2pBYU1SZ3dGZ1lEVlFRS0V3OU1iMjVuYUc5eWJpQXRJRlJsYzNRd2dnRWlNQTBHCkNTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFEWHpVdXJnUFpEZ3pUM0RZdWFlYmdld3Fvd2RlQUQKODRWWWF6ZlN1USs3K21Oa2lpUVBvelVVMmZvUWFGL1BxekJiUW1lZ29hT3l5NVhqM1VFeG1GcmV0eDBaRjVOVgpKTi85ZWFJNWRXRk9teHhpMElPUGI2T0RpbE1qcXVEbUVPSXljdjRTaCsvSWo5Zk1nS0tXUDdJZGxDNUJPeThkCncwOVdkckxxaE9WY3BKamNxYjN6K3hISHd5Q05YeGhoRm9tb2xQVnpJbnlUUEJTZkRuSDBuS0lHUXl2bGhCMGsKVHBHSzYxc2prZnFTK3hpNTlJeHVrbHZIRXNQcjFXblRzYU9oaVh6N3lQSlorcTNBMWZoVzBVa1JaRFlnWnNFbQovZ05KM3JwOFhZdURna2kzZ0UrOElXQWRBWHExeWhqRDdSSkI4VFNJYTV0SGpKUUtqZ0NlSG5HekFnTUJBQUdqCmF6QnBNQTRHQTFVZER3RUIvd1FFQXdJQ3BEQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBVEFQQmdOVkhSTUIKQWY4RUJUQURBUUgvTURFR0ExVWRFUVFxTUNpQ0NXeHZZMkZzYUc5emRJSVZiV2x1YVc4dGMyVnlkbWxqWlM1awpaV1poZFd4MGh3Ui9BQUFCTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFDbUZMMzlNSHVZMzFhMTFEajRwMjVjCnFQRUM0RHZJUWozTk9kU0dWMmQrZjZzZ3pGejFXTDhWcnF2QjFCMVM2cjRKYjJQRXVJQkQ4NFlwVXJIT1JNU2MKd3ViTEppSEtEa0Jmb2U5QWI1cC9VakpyS0tuajM0RGx2c1cvR3AwWTZYc1BWaVdpVWorb1JLbUdWSTI0Q0JIdgpnK0JtVzNDeU5RR1RLajk0eE02czNBV2xHRW95YXFXUGU1eHllVWUzZjFBWkY5N3RDaklKUmVWbENtaENGK0JtCmFUY1RSUWN3cVdvQ3AwYmJZcHlERFlwUmxxOEdQbElFOW8yWjZBc05mTHJVcGFtZ3FYMmtYa2gxa3lzSlEralAKelFadHJSMG1tdHVyM0RuRW0yYmk0TktIQVFIcFc5TXUxNkdRakUxTmJYcVF0VEI4OGpLNzZjdEg5MzRDYWw2VgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: longhorn-test-minio + namespace: default + labels: + app: longhorn-test-minio +spec: + replicas: 1 + selector: + matchLabels: + app: longhorn-test-minio + template: + metadata: + labels: + app: longhorn-test-minio + spec: + nodeSelector: + node-role.kubernetes.io/control-plane: "true" + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoExecute" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoExecute" + - key: "node-role.kubernetes.io/etcd" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/etcd" + operator: "Exists" + effect: "NoExecute" + volumes: + - name: minio-volume + emptyDir: {} + - name: minio-certificates + secret: + secretName: minio-secret + items: + - key: AWS_CERT + path: public.crt + - key: AWS_CERT_KEY + path: private.key + containers: + - name: minio + image: minio/minio:RELEASE.2022-02-01T18-00-14Z + command: ["sh", "-c", "mkdir -p /storage/backupbucket && mkdir -p /root/.minio/certs && ln -s /root/certs/private.key /root/.minio/certs/private.key && ln -s /root/certs/public.crt /root/.minio/certs/public.crt && exec minio server /storage"] + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: minio-secret + key: AWS_ACCESS_KEY_ID + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: minio-secret + key: AWS_SECRET_ACCESS_KEY + ports: + - containerPort: 9000 + volumeMounts: + - name: minio-volume + mountPath: "/storage" + - name: minio-certificates + mountPath: "/root/certs" + readOnly: true +--- +apiVersion: v1 +kind: Service +metadata: + name: minio-service + namespace: default +spec: + selector: + app: longhorn-test-minio + ports: + - port: 9000 + targetPort: 9000 + protocol: TCP + sessionAffinity: ClientIP diff --git a/manager/integration/deploy/backupstores/nfs-backupstore.yaml b/manager/integration/deploy/backupstores/nfs-backupstore.yaml new file mode 100644 index 0000000000..548cb7b884 --- /dev/null +++ b/manager/integration/deploy/backupstores/nfs-backupstore.yaml @@ -0,0 +1,81 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: longhorn-test-nfs + namespace: default + labels: + app: longhorn-test-nfs +spec: + selector: + matchLabels: + app: longhorn-test-nfs + template: + metadata: + labels: + app: longhorn-test-nfs + spec: + nodeSelector: + node-role.kubernetes.io/control-plane: "true" + tolerations: + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/master" + operator: "Exists" + effect: "NoExecute" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoExecute" + - key: "node-role.kubernetes.io/etcd" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/etcd" + operator: "Exists" + effect: "NoExecute" + volumes: + - name: nfs-volume + emptyDir: {} + containers: + - name: longhorn-test-nfs-container + image: longhornio/nfs-ganesha:latest + imagePullPolicy: Always + env: + - name: EXPORT_ID + value: "14" + - name: EXPORT_PATH + value: /opt/backupstore + - name: PSEUDO_PATH + value: /opt/backupstore + - name: NFS_DISK_IMAGE_SIZE_MB + value: "4096" + command: ["bash", "-c", "chmod 700 /opt/backupstore && /opt/start_nfs.sh | tee /var/log/ganesha.log"] + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN", "DAC_READ_SEARCH"] + volumeMounts: + - name: nfs-volume + mountPath: "/opt/backupstore" + livenessProbe: + exec: + command: ["bash", "-c", "grep \"No export entries found\" /var/log/ganesha.log > /dev/null 2>&1 ; [ $? -ne 0 ]"] + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 4 +--- +kind: Service +apiVersion: v1 +metadata: + name: longhorn-test-nfs-svc + namespace: default +spec: + selector: + app: longhorn-test-nfs + clusterIP: None + ports: + - name: notnecessary + port: 1234 + targetPort: 1234 diff --git a/manager/integration/deploy/test.yaml b/manager/integration/deploy/test.yaml index eb3a44c8f5..df56d679db 100644 --- a/manager/integration/deploy/test.yaml +++ b/manager/integration/deploy/test.yaml @@ -42,7 +42,7 @@ spec: - name: LONGHORN_JUNIT_REPORT_PATH value: /tmp/test-report/longhorn-test-junit-report.xml - name: LONGHORN_BACKUPSTORES - value: "s3://backupbucket@us-east-1/backupstore$minio-secret, nfs://longhorn-test-nfs-svc.default:/opt/backupstore" + value: "s3://backupbucket@us-east-1/backupstore$minio-secret, nfs://longhorn-test-nfs-svc.default:/opt/backupstore, cifs://longhorn-test-cifs-svc.default/backupstore$cifs-secret, azblob://longhorn-test-azurite@core.windows.net/$azblob-secret" - name: LONGHORN_BACKUPSTORE_POLL_INTERVAL value: "30" - name: LONGHORN_DISK_TYPE diff --git a/manager/integration/tests/backupstore.py b/manager/integration/tests/backupstore.py index ac74615c6e..b938636d85 100644 --- a/manager/integration/tests/backupstore.py +++ b/manager/integration/tests/backupstore.py @@ -17,6 +17,8 @@ from common import cleanup_all_volumes from common import is_backupTarget_s3 from common import is_backupTarget_nfs +from common import is_backupTarget_cifs +from common import is_backupTarget_azurite from common import get_longhorn_api_client from common import delete_backup_volume from common import delete_backup_backing_image @@ -64,8 +66,12 @@ def set_random_backupstore(request, client): elif request.param == "nfs": set_backupstore_nfs(client) mount_nfs_backupstore(client) + elif request.param == "cifs": + set_backupstore_cifs(client) + elif request.param == "azblob": + set_backupstore_azurite(client) - yield + yield request.param cleanup_all_volumes(client) backupstore_cleanup(client) system_backups_cleanup(client) @@ -116,6 +122,30 @@ def set_backupstore_nfs(client): break +def set_backupstore_cifs(client): + backupstores = get_backupstore_url() + poll_interval = get_backupstore_poll_interval() + for backupstore in backupstores: + if is_backupTarget_cifs(backupstore): + backupsettings = backupstore.split("$") + set_backupstore_url(client, backupsettings[0]) + set_backupstore_credential_secret(client, backupsettings[1]) + set_backupstore_poll_interval(client, poll_interval) + break + + +def set_backupstore_azurite(client): + backupstores = get_backupstore_url() + poll_interval = get_backupstore_poll_interval() + for backupstore in backupstores: + if is_backupTarget_azurite(backupstore): + backupsettings = backupstore.split("$") + set_backupstore_url(client, backupsettings[0]) + set_backupstore_credential_secret(client, backupsettings[1]) + set_backupstore_poll_interval(client, poll_interval) + break + + def set_backupstore_url(client, url): backup_target_setting = client.by_id_setting(SETTING_BACKUP_TARGET) backup_target_setting = client.update(backup_target_setting, @@ -274,7 +304,7 @@ def backupstore_get_backup_volume_prefix(client, volume_name): return nfs_get_backup_volume_prefix(client, volume_name) else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type is not supported") # NOQA def minio_get_backup_volume_prefix(volume_name): @@ -311,7 +341,7 @@ def backupstore_get_backup_cfg_file_path(client, volume_name, backup_name): return nfs_get_backup_cfg_file_path(client, volume_name, backup_name) else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type is not supported") # NOQA def minio_get_backup_cfg_file_path(volume_name, backup_name): @@ -334,7 +364,7 @@ def backupstore_get_volume_cfg_file_path(client, volume_name): return nfs_get_volume_cfg_file_path(client, volume_name) else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type is not supported") # NOQA def nfs_get_volume_cfg_file_path(client, volume_name): @@ -357,7 +387,7 @@ def backupstore_get_backup_blocks_dir(client, volume_name): return nfs_get_backup_blocks_dir(client, volume_name) else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type is not supported") # NOQA def minio_get_backup_blocks_dir(volume_name): @@ -383,7 +413,7 @@ def backupstore_create_file(client, core_api, file_path, data={}): return nfs_create_file_in_backupstore(file_path, data={}) else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type is not supported") # NOQA def mino_create_file_in_backupstore(client, core_api, file_path, data={}): # NOQA @@ -433,7 +463,7 @@ def backupstore_write_backup_cfg_file(client, core_api, volume_name, backup_name data) else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type is not supported") # NOQA def nfs_write_backup_cfg_file(client, volume_name, backup_name, data): @@ -481,7 +511,7 @@ def backupstore_delete_file(client, core_api, file_path): return nfs_delete_file_in_backupstore(file_path) else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type is not supported") # NOQA def mino_delete_file_in_backupstore(client, core_api, file_path): @@ -521,7 +551,7 @@ def backupstore_delete_backup_cfg_file(client, core_api, volume_name, backup_nam nfs_delete_backup_cfg_file(client, volume_name, backup_name) else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type is not supported") # NOQA def nfs_delete_backup_cfg_file(client, volume_name, backup_name): @@ -563,7 +593,7 @@ def backupstore_delete_volume_cfg_file(client, core_api, volume_name): # NOQA nfs_delete_volume_cfg_file(client, volume_name) else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type is not supported") # NOQA def nfs_delete_volume_cfg_file(client, volume_name): @@ -632,7 +662,7 @@ def backupstore_delete_random_backup_block(client, core_api, volume_name): nfs_delete_random_backup_block(client, volume_name) else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type is not supported") # NOQA def nfs_delete_random_backup_block(client, volume_name): @@ -681,7 +711,7 @@ def backupstore_count_backup_block_files(client, core_api, volume_name): return nfs_count_backup_block_files(client, volume_name) else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type is not supported") # NOQA def nfs_count_backup_block_files(client, volume_name): diff --git a/manager/integration/tests/common.py b/manager/integration/tests/common.py index daa02445a0..dfbda10f89 100644 --- a/manager/integration/tests/common.py +++ b/manager/integration/tests/common.py @@ -3900,6 +3900,14 @@ def is_backupTarget_nfs(s): return s.startswith("nfs://") +def is_backupTarget_cifs(s): + return s.startswith("cifs://") + + +def is_backupTarget_azurite(s): + return s.startswith("azblob://") + + def wait_for_backup_volume(client, vol_name, backing_image=""): for _ in range(RETRY_BACKUP_COUNTS): bv = client.by_id_backupVolume(vol_name) diff --git a/manager/integration/tests/test_basic.py b/manager/integration/tests/test_basic.py index 77851b0500..6c1f756072 100644 --- a/manager/integration/tests/test_basic.py +++ b/manager/integration/tests/test_basic.py @@ -673,6 +673,10 @@ def test_backup_block_deletion(set_random_backupstore, client, core_api, volume_ 17. Delete the backup volume 18. Cleanup the volume """ + backup_store_type = set_random_backupstore + if backup_store_type not in ["nfs", "s3"]: + pytest.skip("Skip test case because the backup store type is not supported") # NOQA + backupstore_cleanup(client) volume = create_and_check_volume(client, volume_name) @@ -1106,6 +1110,10 @@ def test_backup_volume_list(set_random_backupstore, client, core_api): # NOQA 11. delete backup volumes(1 & 2) 12. cleanup """ + backup_store_type = set_random_backupstore + if backup_store_type not in ["nfs", "s3"]: + pytest.skip("Skip test case because the backup store type is not supported") # NOQA + backupstore_cleanup(client) # create 2 volumes. @@ -1200,6 +1208,10 @@ def test_backup_metadata_deletion(set_random_backupstore, client, core_api, volu 18. verify that volume(1) has been deleted in the backupstore. 19. cleanup """ + backup_store_type = set_random_backupstore + if backup_store_type not in ["nfs", "s3"]: + pytest.skip("Skip test case because the backup store type is not supported") # NOQA + backupstore_cleanup(client) volume1_name = volume_name + "-1" @@ -4392,7 +4404,7 @@ def test_backuptarget_available_during_engine_image_not_ready(client, apps_api): url = backupstore cred_secret = "" else: - raise NotImplementedError + pytest.skip("Skip test case because the backup store type is not supported") # NOQA poll_intervals = ["0", "300"] for poll_interval in poll_intervals: diff --git a/manager/integration/tests/test_ha.py b/manager/integration/tests/test_ha.py index 7deafc060d..40747344d4 100644 --- a/manager/integration/tests/test_ha.py +++ b/manager/integration/tests/test_ha.py @@ -1409,6 +1409,10 @@ def test_all_replica_restore_failure(set_random_backupstore, client, core_api, v 15. Verify the faulted volume cannot be attached to a node. 16. Verify this faulted volume can be deleted. """ + backup_store_type = set_random_backupstore + if backup_store_type not in ["nfs", "s3"]: + pytest.skip("Skip test case because the backup store type is not supported") # NOQA + auto_salvage_setting = client.by_id_setting(SETTING_AUTO_SALVAGE) assert auto_salvage_setting.name == SETTING_AUTO_SALVAGE assert auto_salvage_setting.value == "true" diff --git a/pipelines/utilities/install_backupstores.sh b/pipelines/utilities/install_backupstores.sh index 7f043e8a8d..3355931f79 100755 --- a/pipelines/utilities/install_backupstores.sh +++ b/pipelines/utilities/install_backupstores.sh @@ -1,6 +1,34 @@ install_backupstores(){ - MINIO_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/minio-backupstore.yaml" - NFS_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/nfs-backupstore.yaml" + MINIO_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn-tests/master/manager/integration/deploy/backupstores/minio-backupstore.yaml" + NFS_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn-tests/master/manager/integration/deploy/backupstores/nfs-backupstore.yaml" + CIFS_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/cifs-backupstore.yaml" + AZURITE_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/azurite-backupstore.yaml" kubectl create -f ${MINIO_BACKUPSTORE_URL} \ - -f ${NFS_BACKUPSTORE_URL} + -f ${NFS_BACKUPSTORE_URL} \ + -f ${CIFS_BACKUPSTORE_URL} \ + -f ${AZURITE_BACKUPSTORE_URL} + setup_azuitize_backup_store +} + +setup_azuitize_backup_store(){ + RETRY=0 + MAX_RETRY=60 + until (kubectl get pods | grep 'longhorn-test-azblob' | grep 'Running'); do + echo 'Waiting azurite pod running' + sleep 5 + if [ $RETRY -eq $MAX_RETRY ]; then + break + fi + RETRY=$((RETRY+1)) + done + + AZBLOB_ENDPOINT=$(echo -n "http://$(kubectl get svc azblob-service -o jsonpath='{.spec.clusterIP}'):10000/" | base64) + kubectl -n longhorn-system patch secret azblob-secret \ + --type=json \ + -p="[{'op': 'replace', 'path': '/data/AZBLOB_ENDPOINT', 'value': \"${AZBLOB_ENDPOINT}\"}]" + + CONTROL_PLANE_PUBLIC_IP=$(cat /tmp/controlplane_public_ip) + # port forward and az container create need to be run on control node + ssh ec2-user@${CONTROL_PLANE_PUBLIC_IP} "nohup kubectl port-forward --address 0.0.0.0 service/azblob-service 20001:10000 > /dev/null 2>&1 &" + ssh ec2-user@${CONTROL_PLANE_PUBLIC_IP} "az storage container create -n longhorn-test-azurite --connection-string 'DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://0.0.0.0:20001/devstoreaccount1;'" } \ No newline at end of file diff --git a/pipelines/utilities/run_longhorn_e2e_test.sh b/pipelines/utilities/run_longhorn_e2e_test.sh index 8cd6606fe8..9e09a90d80 100755 --- a/pipelines/utilities/run_longhorn_e2e_test.sh +++ b/pipelines/utilities/run_longhorn_e2e_test.sh @@ -1,5 +1,7 @@ S3_BACKUP_STORE='s3://backupbucket@us-east-1/backupstore$minio-secret' NFS_BACKUP_STORE='nfs://longhorn-test-nfs-svc.default:/opt/backupstore' +CIFS_BACKUP_STORE='cifs://longhorn-test-cifs-svc.default/backupstore$cifs-secret' +AZURITE_BACKUP_STORE='azblob://longhorn-test-azurite@core.windows.net/$azblob-secret' run_longhorn_e2e_test(){ @@ -22,6 +24,10 @@ run_longhorn_e2e_test(){ yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${S3_BACKUP_STORE}'"' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} elif [[ $BACKUP_STORE_TYPE = "nfs" ]]; then yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${NFS_BACKUP_STORE}'"' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} + elif [[ $BACKUP_STORE_TYPE = "cifs" ]]; then + yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${CIFS_BACKUP_STORE}'"' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} + elif [[ $BACKUP_STORE_TYPE = "azurite" ]]; then + yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${AZURITE_BACKUP_STORE}'"' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} fi if [[ "${TF_VAR_use_hdd}" == true ]]; then @@ -76,6 +82,10 @@ run_longhorn_e2e_test_out_of_cluster(){ LONGHORN_BACKUPSTORES=${S3_BACKUP_STORE} elif [[ $BACKUP_STORE_TYPE = "nfs" ]]; then LONGHORN_BACKUPSTORES=${NFS_BACKUP_STORE} + elif [[ $BACKUP_STORE_TYPE = "cifs" ]]; then + LONGHORN_BACKUPSTORES=${CIFS_BACKUP_STORE} + elif [[ $BACKUP_STORE_TYPE = "azurite" ]]; then + LONGHORN_BACKUPSTORES=${AZURITE_BACKUP_STORE} fi LONGHORN_BACKUPSTORE_POLL_INTERVAL="30" diff --git a/pipelines/utilities/run_longhorn_test.sh b/pipelines/utilities/run_longhorn_test.sh index 7ef17dfee9..26fd8f93e8 100755 --- a/pipelines/utilities/run_longhorn_test.sh +++ b/pipelines/utilities/run_longhorn_test.sh @@ -24,6 +24,12 @@ run_longhorn_test(){ elif [[ $BACKUP_STORE_TYPE = "nfs" ]]; then BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $2}' | sed 's/ *//'` yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} + elif [[ $BACKUP_STORE_TYPE = "cifs" ]]; then + BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $3}' | sed 's/ *//'` + yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} + elif [[ $BACKUP_STORE_TYPE = "azurite" ]]; then + BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $4}' | sed 's/ *//'` + yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} fi if [[ "${TF_VAR_use_hdd}" == true ]]; then @@ -107,6 +113,12 @@ run_longhorn_upgrade_test(){ elif [[ $BACKUP_STORE_TYPE = "nfs" ]]; then BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $2}' | sed 's/ *//'` yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} + elif [[ $BACKUP_STORE_TYPE = "cifs" ]]; then + BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $3}' | sed 's/ *//'` + yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} + elif [[ $BACKUP_STORE_TYPE = "azurite" ]]; then + BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $4}' | sed 's/ *//'` + yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} fi yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[4].value="'${LONGHORN_UPGRADE_TYPE}'"' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} diff --git a/test_framework/scripts/longhorn-setup.sh b/test_framework/scripts/longhorn-setup.sh index 3356e4b26f..b091c3a9b6 100755 --- a/test_framework/scripts/longhorn-setup.sh +++ b/test_framework/scripts/longhorn-setup.sh @@ -332,12 +332,39 @@ create_longhorn_namespace(){ install_backupstores(){ - MINIO_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/minio-backupstore.yaml" - NFS_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/nfs-backupstore.yaml" + MINIO_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn-tests/master/manager/integration/deploy/backupstores/minio-backupstore.yaml" + NFS_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn-tests/master/manager/integration/deploy/backupstores/nfs-backupstore.yaml" + CIFS_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/cifs-backupstore.yaml" + AZURITE_BACKUPSTORE_URL="https://raw.githubusercontent.com/longhorn/longhorn/master/deploy/backupstores/azurite-backupstore.yaml" kubectl create -f ${MINIO_BACKUPSTORE_URL} \ - -f ${NFS_BACKUPSTORE_URL} + -f ${NFS_BACKUPSTORE_URL} \ + -f ${CIFS_BACKUPSTORE_URL} \ + -f ${AZURITE_BACKUPSTORE_URL} + setup_azuitize_backup_store } +setup_azuitize_backup_store(){ + RETRY=0 + MAX_RETRY=60 + until (kubectl get pods | grep 'longhorn-test-azblob' | grep 'Running'); do + echo 'Waiting azurite pod running' + sleep 5 + if [ $RETRY -eq $MAX_RETRY ]; then + break + fi + RETRY=$((RETRY+1)) + done + + AZBLOB_ENDPOINT=$(echo -n "http://$(kubectl get svc azblob-service -o jsonpath='{.spec.clusterIP}'):10000/" | base64) + kubectl -n longhorn-system patch secret azblob-secret \ + --type=json \ + -p="[{'op': 'replace', 'path': '/data/AZBLOB_ENDPOINT', 'value': \"${AZBLOB_ENDPOINT}\"}]" + + CONTROL_PLANE_PUBLIC_IP=$(cat /tmp/controlplane_public_ip) + # port forward and az container create need to be run on control node + ssh ec2-user@${CONTROL_PLANE_PUBLIC_IP} "nohup kubectl port-forward --address 0.0.0.0 service/azblob-service 20001:10000 > /dev/null 2>&1 &" + ssh ec2-user@${CONTROL_PLANE_PUBLIC_IP} "az storage container create -n longhorn-test-azurite --connection-string 'DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://0.0.0.0:20001/devstoreaccount1;'" +} create_aws_secret(){ AWS_ACCESS_KEY_ID_BASE64=`echo -n "${TF_VAR_lh_aws_access_key}" | base64` @@ -397,6 +424,12 @@ run_longhorn_upgrade_test(){ elif [[ $BACKUP_STORE_TYPE = "nfs" ]]; then BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $2}' | sed 's/ *//'` yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} + elif [[ $BACKUP_STORE_TYPE = "cifs" ]]; then + BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $3}' | sed 's/ *//'` + yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} + elif [[ $BACKUP_STORE_TYPE = "azurite" ]]; then + BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $4}' | sed 's/ *//'` + yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} fi yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[4].value="'${LONGHORN_UPGRADE_TYPE}'"' ${LONGHORN_UPGRADE_TESTS_MANIFEST_FILE_PATH} @@ -451,6 +484,12 @@ run_longhorn_tests(){ elif [[ $BACKUP_STORE_TYPE = "nfs" ]]; then BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $2}' | sed 's/ *//'` yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} + elif [[ $BACKUP_STORE_TYPE = "cifs" ]]; then + BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $3}' | sed 's/ *//'` + yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} + elif [[ $BACKUP_STORE_TYPE = "azurite" ]]; then + BACKUP_STORE_FOR_TEST=`yq e 'select(.spec.containers[0] != null).spec.containers[0].env[1].value' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} | awk -F ',' '{print $4}' | sed 's/ *//'` + yq e -i 'select(.spec.containers[0] != null).spec.containers[0].env[1].value="'${BACKUP_STORE_FOR_TEST}'"' ${LONGHORN_TESTS_MANIFEST_FILE_PATH} fi if [[ "${TF_VAR_use_hdd}" == true ]]; then diff --git a/test_framework/terraform/aws/centos/user-data-scripts/provision_k3s_agent.sh.tpl b/test_framework/terraform/aws/centos/user-data-scripts/provision_k3s_agent.sh.tpl index fe80f7f8a0..8c4c5c2d9b 100755 --- a/test_framework/terraform/aws/centos/user-data-scripts/provision_k3s_agent.sh.tpl +++ b/test_framework/terraform/aws/centos/user-data-scripts/provision_k3s_agent.sh.tpl @@ -14,7 +14,7 @@ sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' / sudo yum update -y sudo yum group install -y "Development Tools" -sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools +sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools samba sudo systemctl -q enable iscsid sudo systemctl start iscsid diff --git a/test_framework/terraform/aws/centos/user-data-scripts/provision_rke2_agent.sh.tpl b/test_framework/terraform/aws/centos/user-data-scripts/provision_rke2_agent.sh.tpl index dabe432468..9a1628ff0e 100755 --- a/test_framework/terraform/aws/centos/user-data-scripts/provision_rke2_agent.sh.tpl +++ b/test_framework/terraform/aws/centos/user-data-scripts/provision_rke2_agent.sh.tpl @@ -14,7 +14,7 @@ sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' / sudo yum update -y sudo yum group install -y "Development Tools" -sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools nc +sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools nc samba sudo systemctl -q enable iscsid sudo systemctl start iscsid diff --git a/test_framework/terraform/aws/oracle/user-data-scripts/provision_k3s_agent.sh.tpl b/test_framework/terraform/aws/oracle/user-data-scripts/provision_k3s_agent.sh.tpl index 821f2b4723..a83a51e404 100755 --- a/test_framework/terraform/aws/oracle/user-data-scripts/provision_k3s_agent.sh.tpl +++ b/test_framework/terraform/aws/oracle/user-data-scripts/provision_k3s_agent.sh.tpl @@ -2,7 +2,7 @@ sudo yum update -y sudo yum group install -y "Development Tools" -sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper +sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper samba sudo systemctl -q enable iscsid sudo systemctl start iscsid # disable nm-cloud-setup otherwise k3s-agent service won’t start. diff --git a/test_framework/terraform/aws/oracle/user-data-scripts/provision_rke2_agent.sh.tpl b/test_framework/terraform/aws/oracle/user-data-scripts/provision_rke2_agent.sh.tpl index 19f5b99fc2..6e681cb0ac 100644 --- a/test_framework/terraform/aws/oracle/user-data-scripts/provision_rke2_agent.sh.tpl +++ b/test_framework/terraform/aws/oracle/user-data-scripts/provision_rke2_agent.sh.tpl @@ -2,7 +2,7 @@ sudo yum update -y sudo yum group install -y "Development Tools" -sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper nc +sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper nc samba sudo systemctl -q enable iscsid sudo systemctl start iscsid sudo systemctl disable nm-cloud-setup.service nm-cloud-setup.timer diff --git a/test_framework/terraform/aws/rhel/user-data-scripts/provision_k3s_agent.sh.tpl b/test_framework/terraform/aws/rhel/user-data-scripts/provision_k3s_agent.sh.tpl index 45c9e8580a..a1a8b75d56 100755 --- a/test_framework/terraform/aws/rhel/user-data-scripts/provision_k3s_agent.sh.tpl +++ b/test_framework/terraform/aws/rhel/user-data-scripts/provision_k3s_agent.sh.tpl @@ -10,7 +10,7 @@ fi sudo yum update -y sudo yum group install -y "Development Tools" -sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper +sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper samba sudo systemctl -q enable iscsid sudo systemctl start iscsid sudo systemctl disable nm-cloud-setup.service nm-cloud-setup.timer diff --git a/test_framework/terraform/aws/rhel/user-data-scripts/provision_rke2_agent.sh.tpl b/test_framework/terraform/aws/rhel/user-data-scripts/provision_rke2_agent.sh.tpl index 5c3cebefd9..475a243abd 100755 --- a/test_framework/terraform/aws/rhel/user-data-scripts/provision_rke2_agent.sh.tpl +++ b/test_framework/terraform/aws/rhel/user-data-scripts/provision_rke2_agent.sh.tpl @@ -10,7 +10,7 @@ fi sudo yum update -y sudo yum group install -y "Development Tools" -sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper nc +sudo yum install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper nc samba sudo systemctl -q enable iscsid sudo systemctl start iscsid sudo systemctl disable nm-cloud-setup.service nm-cloud-setup.timer diff --git a/test_framework/terraform/aws/rockylinux/user-data-scripts/provision_k3s_agent.sh.tpl b/test_framework/terraform/aws/rockylinux/user-data-scripts/provision_k3s_agent.sh.tpl index e5ea498944..a6e8aeb1ca 100755 --- a/test_framework/terraform/aws/rockylinux/user-data-scripts/provision_k3s_agent.sh.tpl +++ b/test_framework/terraform/aws/rockylinux/user-data-scripts/provision_k3s_agent.sh.tpl @@ -10,7 +10,7 @@ fi # Do not arbitrarily run "dnf update", as this will effectively move us up to the latest minor release. sudo dnf group install -y "Development Tools" -sudo dnf install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper +sudo dnf install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper samba sudo systemctl -q enable iscsid sudo systemctl start iscsid diff --git a/test_framework/terraform/aws/rockylinux/user-data-scripts/provision_rke2_agent.sh.tpl b/test_framework/terraform/aws/rockylinux/user-data-scripts/provision_rke2_agent.sh.tpl index d2c2a65f70..6b9732ed14 100755 --- a/test_framework/terraform/aws/rockylinux/user-data-scripts/provision_rke2_agent.sh.tpl +++ b/test_framework/terraform/aws/rockylinux/user-data-scripts/provision_rke2_agent.sh.tpl @@ -10,7 +10,7 @@ fi # Do not arbitrarily run "dnf update", as this will effectively move us up to the latest minor release. sudo dnf group install -y "Development Tools" -sudo dnf install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper jq nmap-ncat +sudo dnf install -y iscsi-initiator-utils nfs-utils nfs4-acl-tools cryptsetup device-mapper jq nmap-ncat samba sudo systemctl -q enable iscsid sudo systemctl start iscsid diff --git a/test_framework/terraform/aws/sles/user-data-scripts/provision_k3s_agent.sh.tpl b/test_framework/terraform/aws/sles/user-data-scripts/provision_k3s_agent.sh.tpl index d888d29949..579c6565c1 100755 --- a/test_framework/terraform/aws/sles/user-data-scripts/provision_k3s_agent.sh.tpl +++ b/test_framework/terraform/aws/sles/user-data-scripts/provision_k3s_agent.sh.tpl @@ -5,7 +5,7 @@ set -e sudo systemctl restart guestregister # Sometimes registration fails on first boot. sudo zypper ref sudo zypper install -y -t pattern devel_basis -sudo zypper install -y open-iscsi nfs-client cryptsetup device-mapper +sudo zypper install -y open-iscsi nfs-client cryptsetup device-mapper samba sudo systemctl -q enable iscsid sudo systemctl start iscsid diff --git a/test_framework/terraform/aws/sles/user-data-scripts/provision_k3s_server.sh.tpl b/test_framework/terraform/aws/sles/user-data-scripts/provision_k3s_server.sh.tpl index 2a2df03018..3057431068 100755 --- a/test_framework/terraform/aws/sles/user-data-scripts/provision_k3s_server.sh.tpl +++ b/test_framework/terraform/aws/sles/user-data-scripts/provision_k3s_server.sh.tpl @@ -5,7 +5,7 @@ set -e sudo systemctl restart guestregister # Sometimes registration fails on first boot. sudo zypper ref sudo zypper install -y -t pattern devel_basis -sudo zypper install -y open-iscsi nfs-client jq +sudo zypper install -y open-iscsi nfs-client jq azure-cli sudo systemctl -q enable iscsid sudo systemctl start iscsid @@ -27,4 +27,4 @@ done if [[ -n "${custom_ssh_public_key}" ]]; then echo "${custom_ssh_public_key}" >> /home/ec2-user/.ssh/authorized_keys -fi +fi \ No newline at end of file diff --git a/test_framework/terraform/aws/sles/user-data-scripts/provision_rke2_agent.sh.tpl b/test_framework/terraform/aws/sles/user-data-scripts/provision_rke2_agent.sh.tpl index f1c8755125..5e43b9749b 100755 --- a/test_framework/terraform/aws/sles/user-data-scripts/provision_rke2_agent.sh.tpl +++ b/test_framework/terraform/aws/sles/user-data-scripts/provision_rke2_agent.sh.tpl @@ -5,7 +5,7 @@ set -e sudo systemctl restart guestregister # Sometimes registration fails on first boot. sudo zypper ref sudo zypper install -y -t pattern devel_basis -sudo zypper install -y open-iscsi nfs-client cryptsetup device-mapper +sudo zypper install -y open-iscsi nfs-client cryptsetup device-mapper samba sudo systemctl -q enable iscsid sudo systemctl start iscsid diff --git a/test_framework/terraform/aws/sles/user-data-scripts/provision_rke2_server.sh.tpl b/test_framework/terraform/aws/sles/user-data-scripts/provision_rke2_server.sh.tpl index 6bf855bc44..3f213525aa 100755 --- a/test_framework/terraform/aws/sles/user-data-scripts/provision_rke2_server.sh.tpl +++ b/test_framework/terraform/aws/sles/user-data-scripts/provision_rke2_server.sh.tpl @@ -5,7 +5,7 @@ set -e sudo systemctl restart guestregister # Sometimes registration fails on first boot. sudo zypper ref sudo zypper install -y -t pattern devel_basis -sudo zypper install -y open-iscsi nfs-client jq +sudo zypper install -y open-iscsi nfs-client jq azure-cli sudo systemctl -q enable iscsid sudo systemctl start iscsid diff --git a/test_framework/terraform/aws/ubuntu/user-data-scripts/provision_k3s_agent.sh.tpl b/test_framework/terraform/aws/ubuntu/user-data-scripts/provision_k3s_agent.sh.tpl index cb13a443c8..0366fae36d 100755 --- a/test_framework/terraform/aws/ubuntu/user-data-scripts/provision_k3s_agent.sh.tpl +++ b/test_framework/terraform/aws/ubuntu/user-data-scripts/provision_k3s_agent.sh.tpl @@ -1,7 +1,7 @@ #!/bin/bash apt-get update -apt-get install -y nfs-common cryptsetup dmsetup linux-modules-extra-`uname -r` +apt-get install -y nfs-common cryptsetup dmsetup samba linux-modules-extra-`uname -r` modprobe uio modprobe uio_pci_generic diff --git a/test_framework/terraform/aws/ubuntu/user-data-scripts/provision_rke2_agent.sh.tpl b/test_framework/terraform/aws/ubuntu/user-data-scripts/provision_rke2_agent.sh.tpl index 5de3d402aa..642485c5ef 100755 --- a/test_framework/terraform/aws/ubuntu/user-data-scripts/provision_rke2_agent.sh.tpl +++ b/test_framework/terraform/aws/ubuntu/user-data-scripts/provision_rke2_agent.sh.tpl @@ -1,7 +1,7 @@ #!/bin/bash apt-get update -apt-get install -y nfs-common cryptsetup dmsetup linux-modules-extra-`uname -r` +apt-get install -y nfs-common cryptsetup dmsetup samba linux-modules-extra-`uname -r` modprobe uio modprobe uio_pci_generic diff --git a/test_framework/terraform/equinix/ubuntu/user-data-scripts/provision_k3s_agent.sh.tpl b/test_framework/terraform/equinix/ubuntu/user-data-scripts/provision_k3s_agent.sh.tpl index 2bb4fe66d2..2e0832903e 100644 --- a/test_framework/terraform/equinix/ubuntu/user-data-scripts/provision_k3s_agent.sh.tpl +++ b/test_framework/terraform/equinix/ubuntu/user-data-scripts/provision_k3s_agent.sh.tpl @@ -4,7 +4,7 @@ set -e set -x apt-get update -apt-get install -y nfs-common cryptsetup dmsetup linux-modules-extra-`uname -r` +apt-get install -y nfs-common cryptsetup dmsetup samba linux-modules-extra-`uname -r` modprobe uio modprobe uio_pci_generic diff --git a/test_framework/terraform/equinix/ubuntu/user-data-scripts/provision_rke2_agent.sh.tpl b/test_framework/terraform/equinix/ubuntu/user-data-scripts/provision_rke2_agent.sh.tpl index 39da2ad640..f3bb124b88 100755 --- a/test_framework/terraform/equinix/ubuntu/user-data-scripts/provision_rke2_agent.sh.tpl +++ b/test_framework/terraform/equinix/ubuntu/user-data-scripts/provision_rke2_agent.sh.tpl @@ -4,7 +4,7 @@ set -e set -x apt-get update -apt-get install -y nfs-common cryptsetup dmsetup linux-modules-extra-`uname -r` +apt-get install -y nfs-common cryptsetup dmsetup samba linux-modules-extra-`uname -r` modprobe uio modprobe uio_pci_generic